diff --git a/.dockerignore b/.dockerignore index 39b31bd2c..567609b12 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1 @@ -build/libgit2/ +build/ diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 000000000..f8796c21f --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,40 @@ +version: 2 + +updates: + - package-ecosystem: "gomod" + directory: "/" + labels: ["dependencies"] + schedule: + interval: "monthly" + groups: + go-deps: + patterns: + - "*" + allow: + - dependency-type: "direct" + ignore: + # Cloud SDK are updated manually + - dependency-name: "cloud.google.com/*" + - dependency-name: "github.com/Azure/azure-sdk-for-go/*" + # Kubernetes deps are updated by fluxcd/pkg/runtime + - dependency-name: "k8s.io/*" + - dependency-name: "sigs.k8s.io/*" + - dependency-name: "github.com/go-logr/*" + # OCI deps are updated by fluxcd/pkg/oci + - dependency-name: "github.com/docker/*" + - dependency-name: "github.com/distribution/*" + - dependency-name: "github.com/google/go-containerregistry*" + - dependency-name: "github.com/opencontainers/*" + # Helm deps are updated by fluxcd/pkg/helmtestserver + - dependency-name: "helm.sh/helm/*" + # Flux APIs are updated at release time + - dependency-name: "github.com/fluxcd/source-controller/api" + - package-ecosystem: "github-actions" + directory: "/" + labels: ["area/ci", "dependencies"] + groups: + ci: + patterns: + - "*" + schedule: + interval: "monthly" diff --git a/.github/labels.yaml b/.github/labels.yaml new file mode 100644 index 000000000..2f3e1d525 --- /dev/null +++ b/.github/labels.yaml @@ -0,0 +1,42 @@ +# Configuration file to declaratively configure labels +# Ref: https://github.com/EndBug/label-sync#Config-files + +- name: area/bucket + description: Bucket related issues and pull requests + color: '#00b140' +- name: area/git + description: Git related issues and pull requests + color: '#863faf' +- name: area/helm + description: Helm related issues and pull requests + color: '#1673b6' +- name: area/oci + description: OCI related issues and pull requests + color: '#c739ff' +- name: area/storage + description: Storage related issues and pull requests + color: '#4b0082' +- name: backport:release/v1.0.x + description: To be backported to release/v1.0.x + color: '#ffd700' +- name: backport:release/v1.1.x + description: To be backported to release/v1.1.x + color: '#ffd700' +- name: backport:release/v1.2.x + description: To be backported to release/v1.2.x + color: '#ffd700' +- name: backport:release/v1.3.x + description: To be backported to release/v1.3.x + color: '#ffd700' +- name: backport:release/v1.4.x + description: To be backported to release/v1.4.x + color: '#ffd700' +- name: backport:release/v1.5.x + description: To be backported to release/v1.5.x + color: '#ffd700' +- name: backport:release/v1.6.x + description: To be backported to release/v1.6.x + color: '#ffd700' +- name: backport:release/v1.7.x + description: To be backported to release/v1.7.x + color: '#ffd700' diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml new file mode 100644 index 000000000..108e3e2bb --- /dev/null +++ b/.github/workflows/backport.yaml @@ -0,0 +1,12 @@ +name: backport +on: + pull_request_target: + types: [closed, labeled] +jobs: + backport: + permissions: + contents: write # for reading and creating branches. + pull-requests: write # for creating pull requests against release branches. + uses: fluxcd/gha-workflows/.github/workflows/backport.yaml@v0.3.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cifuzz.yaml b/.github/workflows/cifuzz.yaml index d13d0a241..c25086ad1 100644 --- a/.github/workflows/cifuzz.yaml +++ b/.github/workflows/cifuzz.yaml @@ -1,28 +1,20 @@ -name: CIFuzz +name: fuzz on: pull_request: branches: - - main - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - -permissions: - contents: read - + - 'main' + - 'release/**' jobs: - Fuzzing: + smoketest: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Restore Go cache - uses: actions/cache@v3 - with: - path: /home/runner/work/_temp/_github_home/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Smoke test Fuzzers - run: make fuzz-smoketest + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.3.0 + with: + go-version: 1.25.x + - name: Smoke test Fuzzers + run: make fuzz-smoketest + env: + SKIP_COSIGN_VERIFICATION: true diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index bfce099be..465bb8f42 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -1,89 +1,35 @@ name: e2e - on: + workflow_dispatch: pull_request: - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' push: branches: - - main - -permissions: - contents: read # for actions/checkout to fetch code - + - 'main' + - 'release/**' jobs: - kind-linux-amd64: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.3.0 with: - go-version: 1.18.x - - name: Restore Go cache - uses: actions/cache@v3 - with: - path: /home/runner/work/_temp/_github_home/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- + go-version: 1.25.x + - name: Verify + run: make verify - name: Enable integration tests - # Only run integration tests for main branch - if: github.ref == 'refs/heads/main' + # Only run integration tests for main and release branches + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') run: | echo 'GO_TAGS=integration' >> $GITHUB_ENV - - name: Setup Kubernetes - uses: engineerd/setup-kind@v0.5.0 - with: - version: v0.11.1 - image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Setup Helm - uses: fluxcd/pkg/actions/helm@main - name: Run E2E tests env: + SKIP_COSIGN_VERIFICATION: true CREATE_CLUSTER: false run: make e2e - - kind-linux-arm64: - # Hosted on Equinix - # Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners - runs-on: [self-hosted, Linux, ARM64, equinix] - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: 1.18.x - - name: Enable integration tests - # Only run integration tests for main branch - if: github.ref == 'refs/heads/main' - run: | - echo 'GO_TAGS=integration' >> $GITHUB_ENV - - name: Prepare - id: prep - run: | - echo ::set-output name=CLUSTER::arm64-${GITHUB_SHA:0:7}-$(date +%s) - echo ::set-output name=CONTEXT::kind-arm64-${GITHUB_SHA:0:7}-$(date +%s) - - name: Setup Kubernetes Kind - run: | - kind create cluster --name ${{ steps.prep.outputs.CLUSTER }} --kubeconfig=/tmp/${{ steps.prep.outputs.CLUSTER }} - - name: Run e2e tests - env: - KIND_CLUSTER_NAME: ${{ steps.prep.outputs.CLUSTER }} - KUBECONFIG: /tmp/${{ steps.prep.outputs.CLUSTER }} - CREATE_CLUSTER: false - BUILD_PLATFORM: linux/arm64 - MINIO_TAG: RELEASE.2020-09-17T04-49-20Z-arm64 - run: make e2e - - name: Cleanup + - name: Print controller logs if: always() + continue-on-error: true run: | - kind delete cluster --name ${{ steps.prep.outputs.CLUSTER }} - rm /tmp/${{ steps.prep.outputs.CLUSTER }} + kubectl -n source-system logs -l app=source-controller diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml deleted file mode 100644 index 74180547f..000000000 --- a/.github/workflows/nightly.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: nightly -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -env: - REPOSITORY: ${{ github.repository }} - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Setup QEMU - uses: docker/setup-qemu-action@v2 - with: - platforms: all - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v2 - with: - buildkitd-flags: "--debug" - - name: Build multi-arch container image - uses: docker/build-push-action@v3 - with: - push: false - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: | - ${{ env.REPOSITORY }}:nightly diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 95f9d0412..ffb1c3cd9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,92 +9,58 @@ on: description: 'image tag prefix' default: 'rc' required: true - -permissions: - contents: write # needed to write releases - id-token: write # needed for keyless signing - packages: write # needed for ghcr access - -env: - CONTROLLER: ${{ github.event.repository.name }} - jobs: - build-push: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Prepare - id: prep - run: | - VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}" - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\//} - fi - echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=VERSION::${VERSION} - - name: Setup QEMU - uses: docker/setup-qemu-action@v2 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v2 - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: fluxcdbot - password: ${{ secrets.GHCR_TOKEN }} - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: fluxcdbot - password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} - - name: Generate images meta - id: meta - uses: docker/metadata-action@v4 - with: - images: | - fluxcd/${{ env.CONTROLLER }} - ghcr.io/fluxcd/${{ env.CONTROLLER }} - tags: | - type=raw,value=${{ steps.prep.outputs.VERSION }} - - name: Publish images - uses: docker/build-push-action@v3 - with: - push: true - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - name: Check images - run: | - docker buildx imagetools inspect docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }} - docker buildx imagetools inspect ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }} - docker pull docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }} - docker pull ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }} - - uses: sigstore/cosign-installer@main - - name: Sign images - env: - COSIGN_EXPERIMENTAL: 1 - run: | - cosign sign fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }} - cosign sign ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }} - - name: Generate release artifacts - if: startsWith(github.ref, 'refs/tags/v') - run: | - mkdir -p config/release - kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml - kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml - echo '[CHANGELOG](https://github.com/fluxcd/${{ env.CONTROLLER }}/blob/main/CHANGELOG.md)' > ./config/release/notes.md - - uses: anchore/sbom-action/download-syft@v0 - - name: Create release and SBOM - if: startsWith(github.ref, 'refs/tags/v') - uses: goreleaser/goreleaser-action@v3 - with: - version: latest - args: release --release-notes=config/release/notes.md --rm-dist --skip-validate - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + release: + permissions: + contents: write # for creating the GitHub release. + id-token: write # for creating OIDC tokens for signing. + packages: write # for pushing and signing container images. + uses: fluxcd/gha-workflows/.github/workflows/controller-release.yaml@v0.3.0 + with: + controller: ${{ github.event.repository.name }} + release-candidate-prefix: ${{ github.event.inputs.tag }} + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + dockerhub-token: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} + release-provenance: + needs: [release] + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + contents: write # for uploading attestations to GitHub releases. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0 + with: + provenance-name: "provenance.intoto.jsonl" + base64-subjects: "${{ needs.release.outputs.hashes }}" + upload-assets: true + dockerhub-provenance: + needs: [release] + permissions: + contents: read # for reading the repository code. + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 + with: + image: ${{ needs.release.outputs.image_url }} + digest: ${{ needs.release.outputs.image_digest }} + registry-username: fluxcdbot + secrets: + registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} + ghcr-provenance: + needs: [release] + permissions: + contents: read # for reading the repository code. + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 + with: + image: ghcr.io/${{ needs.release.outputs.image_url }} + digest: ${{ needs.release.outputs.image_digest }} + registry-username: fluxcdbot + secrets: + registry-password: ${{ secrets.GHCR_TOKEN }} diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 332baa79d..4d7f2b0f5 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -1,5 +1,4 @@ -name: Scan - +name: scan on: push: branches: [ main ] @@ -7,39 +6,12 @@ on: branches: [ main ] schedule: - cron: '18 10 * * 3' - -permissions: - contents: read # for actions/checkout to fetch code - security-events: write # for codeQL to write security events - jobs: - fossa: - name: FOSSA - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Run FOSSA scan and upload build data - uses: fossa-contrib/fossa-action@v1 - with: - # FOSSA Push-Only API Token - fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de - github-token: ${{ github.token }} - - codeql: - name: CodeQL - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.18 - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: go - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + analyze: + permissions: + contents: read # for reading the repository code. + security-events: write # for uploading the CodeQL analysis results. + uses: fluxcd/gha-workflows/.github/workflows/code-scan.yaml@v0.3.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + fossa-token: ${{ secrets.FOSSA_TOKEN }} diff --git a/.github/workflows/sync-labels.yaml b/.github/workflows/sync-labels.yaml new file mode 100644 index 000000000..cc69156a8 --- /dev/null +++ b/.github/workflows/sync-labels.yaml @@ -0,0 +1,16 @@ +name: sync-labels +on: + workflow_dispatch: + push: + branches: + - main + paths: + - .github/labels.yaml +jobs: + sync-labels: + permissions: + contents: read # for reading the labels file. + issues: write # for creating and updating labels. + uses: fluxcd/gha-workflows/.github/workflows/labels-sync.yaml@v0.3.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..4ba71463f --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,22 @@ +name: test +on: + workflow_dispatch: + pull_request: + push: + branches: + - 'main' + - 'release/**' +jobs: + test-linux-amd64: + runs-on: ubuntu-latest + steps: + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.3.0 + with: + go-version: 1.25.x + - name: Run tests + env: + SKIP_COSIGN_VERIFICATION: true + TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} + TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} + run: make test diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml deleted file mode 100644 index 7bb907cf6..000000000 --- a/.github/workflows/tests.yaml +++ /dev/null @@ -1,89 +0,0 @@ -name: tests - -on: - pull_request: - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - - push: - branches: - - main - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - - test-linux-amd64: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: 1.18.x - - name: Restore Go cache - uses: actions/cache@v3 - with: - path: /home/runner/work/_temp/_github_home/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Run tests - env: - TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} - TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} - run: make test - - test-linux-arm64: - # Hosted on Equinix - # Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners - runs-on: [self-hosted, Linux, ARM64, equinix] - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: 1.18.x - - name: Run tests - env: - TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} - TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} - - # Temporarily disabling -race for arm64 as our GitHub action - # runners don't seem to like it. The race detection was tested - # on both Apple M1 and Linux arm64 with successful results. - # - # We should reenable go test -race for arm64 runners once the - # current issue is resolved. - GO_TEST_ARGS: '' - run: make test - - # Runs 'make test' on MacOS to ensure the continuous support for contributors - # using it as a development environment. - darwin-amd64: - strategy: - matrix: - os: [macos-11, macos-12] - fail-fast: false - runs-on: ${{ matrix.os }} - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: 1.18.x - - name: Restore Go cache - uses: actions/cache@v3 - with: - path: /home/runner/work/_temp/_github_home/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Run tests - run: make test diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml deleted file mode 100644 index 33210245f..000000000 --- a/.github/workflows/verify.yaml +++ /dev/null @@ -1,36 +0,0 @@ -name: verify - -on: - pull_request: - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - - push: - branches: - - main - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - - verify-linux-amd64: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: 1.18.x - - name: Restore Go cache - uses: actions/cache@v3 - with: - path: /home/runner/work/_temp/_github_home/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Verify - run: make verify diff --git a/.goreleaser.yaml b/.goreleaser.yaml index f1074d546..7b61ce0c1 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -4,9 +4,26 @@ builds: - skip: true release: - prerelease: "true" extra_files: - glob: config/release/*.yaml + prerelease: "auto" + header: | + ## Changelog + + [{{.Tag}} changelog](https://github.com/fluxcd/{{.ProjectName}}/blob/{{.Tag}}/CHANGELOG.md) + footer: | + ## Container images + + - `docker.io/fluxcd/{{.ProjectName}}:{{.Tag}}` + - `ghcr.io/fluxcd/{{.ProjectName}}:{{.Tag}}` + + Supported architectures: `linux/amd64`, `linux/arm64` and `linux/arm/v7`. + + The container images are built on GitHub hosted runners and are signed with cosign and GitHub OIDC. + To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/). + +changelog: + disable: true checksum: extra_files: @@ -32,6 +49,7 @@ signs: certificate: "${artifact}.pem" args: - sign-blob + - "--yes" - "--output-certificate=${certificate}" - "--output-signature=${signature}" - "${artifact}" diff --git a/ATTRIBUTIONS.md b/ATTRIBUTIONS.md deleted file mode 100644 index 696ab9fa4..000000000 --- a/ATTRIBUTIONS.md +++ /dev/null @@ -1,1201 +0,0 @@ -# Attributions - -This application uses Open Source components. You can find the source -code of their open source projects along with license information below. -We acknowledge and are grateful to these developers for their contributions -to open source. - -## libgit2 - -Libgit2 was obtained in source-code form from its github repository: -https://github.com/libgit2/libgit2/ - -No changes were made to its original source code. - -Copyright notice (https://raw.githubusercontent.com/libgit2/libgit2/main/COPYING): - - libgit2 is Copyright (C) the libgit2 contributors, - unless otherwise stated. See the AUTHORS file for details. - - Note that the only valid version of the GPL as far as this project - is concerned is _this_ particular version of the license (ie v2, not - v2.2 or v3.x or whatever), unless explicitly otherwise stated. - ----------------------------------------------------------------------- - - LINKING EXCEPTION - - In addition to the permissions in the GNU General Public License, - the authors give you unlimited permission to link the compiled - version of this library into combinations with other programs, - and to distribute those combinations without any restriction - coming from the use of this file. (The General Public License - restrictions do apply in other respects; for example, they cover - modification of the file, and distribution when not linked into - a combined executable.) - ----------------------------------------------------------------------- - - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. - ----------------------------------------------------------------------- - -The bundled ZLib code is licensed under the ZLib license: - -Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - ----------------------------------------------------------------------- - -The Clar framework is licensed under the ISC license: - -Copyright (c) 2011-2015 Vicent Marti - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ----------------------------------------------------------------------- - -The bundled PCRE implementation (deps/pcre/) is licensed under the BSD -license. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of the University of Cambridge nor the name of Google - Inc. nor the names of their contributors may be used to endorse or - promote products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- - -The bundled winhttp definition files (deps/winhttp/) are licensed under -the GNU LGPL (available at the end of this file). - -Copyright (C) 2007 Francois Gouget - -This library is free software; you can redistribute it and/or -modify it under the terms of the GNU Lesser General Public -License as published by the Free Software Foundation; either -version 2.1 of the License, or (at your option) any later version. - -This library is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public -License along with this library; if not, write to the Free Software -Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - ----------------------------------------------------------------------- - - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - ----------------------------------------------------------------------- - -The bundled SHA1 collision detection code is licensed under the MIT license: - -MIT License - -Copyright (c) 2017: - Marc Stevens - Cryptology Group - Centrum Wiskunde & Informatica - P.O. Box 94079, 1090 GB Amsterdam, Netherlands - marc@marc-stevens.nl - - Dan Shumow - Microsoft Research - danshu@microsoft.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ----------------------------------------------------------------------- - -The bundled wildmatch code is licensed under the BSD license: - -Copyright Rich Salz. -All rights reserved. - -Redistribution and use in any form are permitted provided that the -following restrictions are are met: - -1. Source distributions must retain this entire copyright notice - and comment. -2. Binary distributions must include the acknowledgement ``This - product includes software developed by Rich Salz'' in the - documentation or other materials provided with the - distribution. This must not be represented as an endorsement - or promotion without specific prior written permission. -3. The origin of this software must not be misrepresented, either - by explicit claim or by omission. Credits must appear in the - source and documentation. -4. Altered versions must be plainly marked as such in the source - and documentation and must not be misrepresented as being the - original software. - -THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. - ----------------------------------------------------------------------- - -Portions of the OpenSSL headers are included under the OpenSSL license: - -Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) -All rights reserved. - -This package is an SSL implementation written -by Eric Young (eay@cryptsoft.com). -The implementation was written so as to conform with Netscapes SSL. - -This library is free for commercial and non-commercial use as long as -the following conditions are aheared to. The following conditions -apply to all code found in this distribution, be it the RC4, RSA, -lhash, DES, etc., code; not just the SSL code. The SSL documentation -included with this distribution is covered by the same copyright terms -except that the holder is Tim Hudson (tjh@cryptsoft.com). - -Copyright remains Eric Young's, and as such any Copyright notices in -the code are not to be removed. -If this package is used in a product, Eric Young should be given attribution -as the author of the parts of the library used. -This can be in the form of a textual message at program startup or -in documentation (online or textual) provided with the package. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. All advertising materials mentioning features or use of this software - must display the following acknowledgement: - "This product includes cryptographic software written by - Eric Young (eay@cryptsoft.com)" - The word 'cryptographic' can be left out if the rouines from the library - being used are not cryptographic related :-). -4. If you include any Windows specific code (or a derivative thereof) from - the apps directory (application code) you must include an acknowledgement: - "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - -THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - -The licence and distribution terms for any publically available version or -derivative of this code cannot be changed. i.e. this code cannot simply be -copied and put under another distribution licence -[including the GNU Public Licence.] - -==================================================================== -Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - -3. All advertising materials mentioning features or use of this - software must display the following acknowledgment: - "This product includes software developed by the OpenSSL Project - for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - -4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - endorse or promote products derived from this software without - prior written permission. For written permission, please contact - openssl-core@openssl.org. - -5. Products derived from this software may not be called "OpenSSL" - nor may "OpenSSL" appear in their names without prior written - permission of the OpenSSL Project. - -6. Redistributions of any form whatsoever must retain the following - acknowledgment: - "This product includes software developed by the OpenSSL Project - for use in the OpenSSL Toolkit (http://www.openssl.org/)" - -THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY -EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR -ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- - -The xoroshiro256** implementation is licensed in the public domain: - -Written in 2018 by David Blackman and Sebastiano Vigna (vigna@acm.org) - -To the extent possible under law, the author has dedicated all copyright -and related and neighboring rights to this software to the public domain -worldwide. This software is distributed without any warranty. - -See . - ----------------------------------------------------------------------- - -The built-in SHA256 support (src/hash/rfc6234) is taken from RFC 6234 -under the following license: - -Copyright (c) 2011 IETF Trust and the persons identified as -authors of the code. All rights reserved. - -Redistribution and use in source and binary forms, with or -without modification, are permitted provided that the following -conditions are met: - -- Redistributions of source code must retain the above - copyright notice, this list of conditions and - the following disclaimer. - -- Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -- Neither the name of Internet Society, IETF or IETF Trust, nor - the names of specific contributors, may be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND -CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/CHANGELOG.md b/CHANGELOG.md index a16e34ba9..74cb010a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,1410 @@ All notable changes to this project are documented in this file. +## 1.7.0 + +**Release date:** 2025-09-15 + +This minor release comes with new features, improvements and bug fixes. + +### ExternalArtifact + +A new [ExternalArtifact](https://github.com/fluxcd/source-controller/blob/main/docs/spec/v1/externalartifacts.md) API has been added to the `source.toolkit.fluxcd.io` group. This API enables advanced source composition and decomposition patterns implemented by the [source-watcher](https://github.com/fluxcd/source-watcher) controller. + +### GitRepository + +GitRepository controller now includes fixes for stalling issues and improved error handling. Multi-tenant workload identity support has been added for Azure repositories when the `ObjectLevelWorkloadIdentity` feature gate is enabled. TLS configuration support has been added for GitHub App authentication. + +### Bucket + +Bucket controller now supports multi-tenant workload identity for AWS, Azure and GCP providers when the `ObjectLevelWorkloadIdentity` feature gate is enabled. A default service account flag has been added for lockdown scenarios. + +### General updates + +The controller now supports system certificate pools for improved CA compatibility, and TLS ServerName pinning has been removed from TLS configuration for better flexibility. A `--default-service-account=` flag was introduced for workload identity multi-tenancy lockdown. + +In addition, the Kubernetes dependencies have been updated to v1.34, Helm +has been updated to v3.19 and various other controller dependencies have +been updated to their latest version. The controller is now built with +Go 1.25. + +Fixes: +- Fix GitRepository controller stalling when it shouldn't + [#1865](https://github.com/fluxcd/source-controller/pull/1865) + +Improvements: +- [RFC-0010] Add multi-tenant workload identity support for GCP Bucket + [#1862](https://github.com/fluxcd/source-controller/pull/1862) +- [RFC-0010] Add multi-tenant workload identity support for AWS Bucket + [#1868](https://github.com/fluxcd/source-controller/pull/1868) +- [RFC-0010] Add multi-tenant workload identity support for Azure GitRepository + [#1871](https://github.com/fluxcd/source-controller/pull/1871) +- [RFC-0010] Add default-service-account for lockdown + [#1872](https://github.com/fluxcd/source-controller/pull/1872) +- [RFC-0010] Add multi-tenant workload identity support for Azure Blob Storage + [#1875](https://github.com/fluxcd/source-controller/pull/1875) +- [RFC-0012] Add ExternalArtifact API documentation + [#1881](https://github.com/fluxcd/source-controller/pull/1881) +- [RFC-0012] Refactor controller to use `fluxcd/pkg/artifact` + [#1883](https://github.com/fluxcd/source-controller/pull/1883) +- Migrate OCIRepository controller to runtime/secrets + [#1851](https://github.com/fluxcd/source-controller/pull/1851) +- Migrate Bucket controller to runtime/secrets + [#1852](https://github.com/fluxcd/source-controller/pull/1852) +- Add TLS config for GitHub App authentication + [#1860](https://github.com/fluxcd/source-controller/pull/1860) +- Remove ServerName pinning from TLS config + [#1870](https://github.com/fluxcd/source-controller/pull/1870) +- Extract storage operations to a dedicated package + [#1864](https://github.com/fluxcd/source-controller/pull/1864) +- Remove deprecated APIs in group `source.toolkit.fluxcd.io/v1beta1` + [#1861](https://github.com/fluxcd/source-controller/pull/1861) +- Migrate tests from gotest to gomega + [#1876](https://github.com/fluxcd/source-controller/pull/1876) +- Update dependencies + [#1888](https://github.com/fluxcd/source-controller/pull/1888) + [#1880](https://github.com/fluxcd/source-controller/pull/1880) + [#1878](https://github.com/fluxcd/source-controller/pull/1878) + [#1876](https://github.com/fluxcd/source-controller/pull/1876) + [#1874](https://github.com/fluxcd/source-controller/pull/1874) + [#1850](https://github.com/fluxcd/source-controller/pull/1850) + [#1844](https://github.com/fluxcd/source-controller/pull/1844) + +## 1.6.2 + +**Release date:** 2025-06-27 + +This patch release comes with a fix for `rsa-sha2-512` and `rsa-sha2-256` algorithms +not being prioritized for `ssh-rsa` host keys. + +Fixes: +- Fix: Prioritize sha2-512 and sha2-256 for ssh-rsa host keys + [#1839](https://github.com/fluxcd/source-controller/pull/1839) + +## 1.6.1 + +**Release date:** 2025-06-13 + +This patch release comes with a fix for the `knownhosts: key mismatch` +error in the `GitRepository` API when using SSH authentication, and +a fix for authentication with +[public ECR repositories](https://fluxcd.io/flux/integrations/aws/#for-amazon-public-elastic-container-registry) +in the `OCIRepository` API. + +Fix: +- Fix authentication for public ECR + [#1825](https://github.com/fluxcd/source-controller/pull/1825) +- Fix `knownhosts key mismatch` regression bug + [#1829](https://github.com/fluxcd/source-controller/pull/1829) + +## 1.6.0 + +**Release date:** 2025-05-27 + +This minor release promotes the OCIRepository API to GA, and comes with new features, +improvements and bug fixes. + +### OCIRepository + +The `OCIRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +OCIRepository API now supports object-level workload identity by setting +`.spec.provider` to one of `aws`, `azure`, or `gcp`, and setting +`.spec.serviceAccountName` to the name of a service account in the same +namespace that has been configured with appropriate cloud permissions. +For this feature to work, the controller feature gate +`ObjectLevelWorkloadIdentity` must be enabled. See a complete guide +[here](https://fluxcd.io/flux/integrations/). + +OCIRepository API now caches registry credentials for cloud providers +by default. This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### GitRepository + +GitRepository API now supports sparse checkout by setting a list +of directories in the `.spec.sparseCheckout` field. This allows +for optimizing the amount of data fetched from the Git repository. + +GitRepository API now supports mTLS authentication for HTTPS Git repositories +by setting the fields `tls.crt`, `tls.key`, and `ca.crt` in the `.data` field +of the referenced Secret in `.spec.secretRef`. + +GitRepository API now caches credentials for non-`generic` providers by default. +This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### General updates + +In addition, the Kubernetes dependencies have been updated to v1.33 and +various other controller dependencies have been updated to their latest +version. The controller is now built with Go 1.24. + +Fixes: +- Downgrade `Masterminds/semver` to v3.3.0 + [#1785](https://github.com/fluxcd/source-controller/pull/1785) + +Improvements: +- Promote OCIRepository API to v1 (GA) + [#1794](https://github.com/fluxcd/source-controller/pull/1794) +- [RFC-0010] Introduce object-level workload identity for container registry APIs and cache credentials + [#1790](https://github.com/fluxcd/source-controller/pull/1790) + [#1802](https://github.com/fluxcd/source-controller/pull/1802) + [#1811](https://github.com/fluxcd/source-controller/pull/1811) +- Implement Sparse Checkout for `GitRepository` + [#1774](https://github.com/fluxcd/source-controller/pull/1774) +- Add Mutual TLS support to `GitRepository` + [#1778](https://github.com/fluxcd/source-controller/pull/1778) +- Introduce token cache for `GitRepository` + [#1745](https://github.com/fluxcd/source-controller/pull/1745) + [#1788](https://github.com/fluxcd/source-controller/pull/1788) + [#1789](https://github.com/fluxcd/source-controller/pull/1789) +- Build controller without CGO + [#1725](https://github.com/fluxcd/source-controller/pull/1725) +- Various dependency updates + [#1812](https://github.com/fluxcd/source-controller/pull/1812) + [#1800](https://github.com/fluxcd/source-controller/pull/1800) + [#1810](https://github.com/fluxcd/source-controller/pull/1810) + [#1806](https://github.com/fluxcd/source-controller/pull/1806) + [#1782](https://github.com/fluxcd/source-controller/pull/1782) + [#1783](https://github.com/fluxcd/source-controller/pull/1783) + [#1775](https://github.com/fluxcd/source-controller/pull/1775) + [#1728](https://github.com/fluxcd/source-controller/pull/1728) + [#1722](https://github.com/fluxcd/source-controller/pull/1722) + +## 1.5.0 + +**Release date:** 2025-02-13 + +This minor release comes with various bug fixes and improvements. + +### GitRepository + +The GitRepository API now supports authenticating through GitHub App +for GitHub repositories. See +[docs](https://fluxcd.io/flux/components/source/gitrepositories/#github). + +In addition, the Kubernetes dependencies have been updated to v1.32.1, Helm has +been updated to v3.17.0 and various other controller dependencies have been +updated to their latest version. + +Fixes: +- Remove deprecated object metrics from controllers + [#1686](https://github.com/fluxcd/source-controller/pull/1686) + +Improvements: +- [RFC-007] Implement GitHub app authentication for git repositories. + [#1647](https://github.com/fluxcd/source-controller/pull/1647) +- Various dependency updates + [#1684](https://github.com/fluxcd/source-controller/pull/1684) + [#1689](https://github.com/fluxcd/source-controller/pull/1689) + [#1693](https://github.com/fluxcd/source-controller/pull/1693) + [#1705](https://github.com/fluxcd/source-controller/pull/1705) + [#1708](https://github.com/fluxcd/source-controller/pull/1708) + [#1709](https://github.com/fluxcd/source-controller/pull/1709) + [#1713](https://github.com/fluxcd/source-controller/pull/1713) + [#1716](https://github.com/fluxcd/source-controller/pull/1716) + +## 1.4.1 + +**Release date:** 2024-09-26 + +This patch release comes with a fix to the `GitRepository` API to keep it +backwards compatible by removing the default value for `.spec.provider` field +when not set in the API. The controller will internally consider an empty value +for the provider as the `generic` provider. + +Fix: +- GitRepo: Remove provider default value from API + [#1626](https://github.com/fluxcd/source-controller/pull/1626) + +## 1.4.0 + +**Release date:** 2024-09-25 + +This minor release promotes the Bucket API to GA, and comes with new features, +improvements and bug fixes. + +### Bucket + +The `Bucket` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +Bucket API now supports proxy through the field `.spec.proxySecretRef` and custom TLS client certificate and CA through the field `.spec.certSecretRef`. + +Bucket API now also supports specifying a custom STS configuration through the field `.spec.sts`. This is currently only supported for the providers `generic` and `aws`. When specifying a custom STS configuration one must specify which STS provider to use. For the `generic` bucket provider we support the `ldap` STS provider, and for the `aws` bucket provider we support the `aws` STS provider. For the `aws` STS provider, one may use the default main STS endpoint, or the regional STS endpoints, or even an interface endpoint. + +### OCIRepository + +OCIRepository API now supports proxy through the field `.spec.proxySecretRef`. + +**Warning**: Proxy is not supported for cosign keyless verification. + +### GitRepository + +GitRepository API now supports OIDC authentication for Azure DevOps repositories through the field `.spec.provider` using the value `azure`. See the docs for details [here](https://fluxcd.io/flux/components/source/gitrepositories/#provider). + +In addition, the Kubernetes dependencies have been updated to v1.31.1, Helm has +been updated to v3.16.1 and various other controller dependencies have been +updated to their latest version. The controller is now built with Go 1.23. + +Fixes: +- helm: Use the default transport pool to preserve proxy settings + [#1490](https://github.com/fluxcd/source-controller/pull/1490) +- Fix incorrect use of format strings with the conditions package. + [#1529](https://github.com/fluxcd/source-controller/pull/1529) +- Fix HelmChart local dependency resolution for name-based path + [#1539](https://github.com/fluxcd/source-controller/pull/1539) +- Fix Helm index validation for Artifactory + [#1516](https://github.com/fluxcd/source-controller/pull/1516) + +Improvements: +- Promote Bucket API to v1 + [#1592](https://github.com/fluxcd/source-controller/pull/1592) +- Add .spec.certSecretRef to Bucket API + [#1475](https://github.com/fluxcd/source-controller/pull/1475) +- Run ARM64 tests on GitHub runners + [#1512](https://github.com/fluxcd/source-controller/pull/1512) +- Add support for .spec.proxySecretRef for generic provider of Bucket API + [#1500](https://github.com/fluxcd/source-controller/pull/1500) +- Improve invalid proxy error message for Bucket API + [#1550](https://github.com/fluxcd/source-controller/pull/1550) +- Add support for AWS STS endpoint in the Bucket API + [#1552](https://github.com/fluxcd/source-controller/pull/1552) +- Add proxy support for GCS buckets + [#1565](https://github.com/fluxcd/source-controller/pull/1565) +- azure-blob: Fix VisitObjects() in integration test + [#1574](https://github.com/fluxcd/source-controller/pull/1574) +- Add proxy support for Azure buckets + [#1567](https://github.com/fluxcd/source-controller/pull/1567) +- Add proxy support for AWS S3 buckets + [#1568](https://github.com/fluxcd/source-controller/pull/1568) +- Add proxy support for OCIRepository API + [#1536](https://github.com/fluxcd/source-controller/pull/1536) +- Add LDAP provider for Bucket STS API + [#1585](https://github.com/fluxcd/source-controller/pull/1585) +- Introduce Bucket provider constants with the common part as a prefix + [#1589](https://github.com/fluxcd/source-controller/pull/1589) +- OCIRepository: Configure proxy for OIDC auth + [#1607](https://github.com/fluxcd/source-controller/pull/1607) +- [RFC-0007] Enable Azure OIDC for Azure DevOps repositories + [#1591](https://github.com/fluxcd/source-controller/pull/1591) +- Build with Go 1.23 + [#1582](https://github.com/fluxcd/source-controller/pull/1582) +- Various dependency updates + [#1507](https://github.com/fluxcd/source-controller/pull/1507) + [#1576](https://github.com/fluxcd/source-controller/pull/1576) + [#1578](https://github.com/fluxcd/source-controller/pull/1578) + [#1579](https://github.com/fluxcd/source-controller/pull/1579) + [#1583](https://github.com/fluxcd/source-controller/pull/1583) + [#1588](https://github.com/fluxcd/source-controller/pull/1588) + [#1603](https://github.com/fluxcd/source-controller/pull/1603) + [#1610](https://github.com/fluxcd/source-controller/pull/1610) + [#1614](https://github.com/fluxcd/source-controller/pull/1614) + [#1618](https://github.com/fluxcd/source-controller/pull/1618) + +## 1.3.0 + +**Release date:** 2024-05-03 + +This minor release promotes the Helm APIs to GA, and comes with new features, +improvements and bug fixes. + +### HelmRepository + +The `HelmRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +For `HelmRepository` of type `oci`, the `.spec.insecure` field allows connecting +over HTTP to an insecure non-TLS container registry. + +To upgrade from `v1beta2`, after deploying the new CRD and controller, +set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that +contain `HelmRepository` definitions. +Bumping the API version in manifests can be done gradually. +It is advised not to delay this procedure as the beta versions will be removed after 6 months. + +### HelmChart + +The `HelmChart` API have been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`, with the exception +of the removal of the deprecated field `.spec.valuesFile` which was replaced with `spec.valuesFiles`. + +The `HelmChart` API was extended with support for +[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1/helmcharts.md#notation) +of Helm OCI charts. + +A new optional field `.spec.ignoreMissingValuesFiles` has been added, +which allows the controller to ignore missing values files rather than failing to reconcile the `HelmChart`. + +### OCIRepository + +The `OCIRepository` API was extended with support for +[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#notation) +of OCI artifacts. + +A new optional field `.spec.ref.semverFilter` has been added, +which allows the controller to filter the tags based on regular expressions +before applying the semver range. This allows +[picking the latest release candidate](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#semverfilter-example) +instead of the latest stable release. + +In addition, the controller has been updated to Kubernetes v1.30.0, +Helm v3.14.4, and various other dependencies to their latest version +to patch upstream CVEs. + +Improvements: +- Promote Helm APIs to `source.toolkit.fluxcd.io/v1` (GA) + [#1428](https://github.com/fluxcd/source-controller/pull/1428) +- Add `.spec.ignoreMissingValuesFiles` to HelmChart API + [#1447](https://github.com/fluxcd/source-controller/pull/1447) +- Implement `.spec.ref.semverFilter` in OCIRepository API + [#1407](https://github.com/fluxcd/source-controller/pull/1407) +- Helm: Allow insecure registry login + [#1412](https://github.com/fluxcd/source-controller/pull/1442) +- Add support for Notation verification to HelmChart and OCIRepository + [#1075](https://github.com/fluxcd/source-controller/pull/1075) +- Various dependency updates + [#1442](https://github.com/fluxcd/source-controller/pull/1442) + [#1450](https://github.com/fluxcd/source-controller/pull/1450) + [#1469](https://github.com/fluxcd/source-controller/pull/1469) + [#1378](https://github.com/fluxcd/source-controller/pull/1378) + +Fixes: +- Bind cached helm index to the maximum index size + [#1457](https://github.com/fluxcd/source-controller/pull/1457) +- Remove `genclient:Namespaced` tag + [#1386](https://github.com/fluxcd/source-controller/pull/1386) + +## 1.2.5 + +**Release date:** 2024-04-04 + +This patch release comes with improvements to the `HelmChart` name validation +and adds logging sanitization of connection error messages for `Bucket` sources. + +Fixes: +- Improve chart name validation + [#1377](https://github.com/fluxcd/source-controller/pull/1377) +- Sanitize URLs for bucket fetch error messages + [#1430](https://github.com/fluxcd/source-controller/pull/1430) + +Improvements: +- Update controller-gen to v0.14.0 + [#1399](https://github.com/fluxcd/source-controller/pull/1399) + +## 1.2.4 + +**Release date:** 2024-02-01 + +This patch release updates the Kubernetes dependencies to v1.28.6 and various +other dependencies to their latest version to patch upstream CVEs. + +Improvements: +- Various dependency updates + [#1362](https://github.com/fluxcd/source-controller/pull/1362) + [#1357](https://github.com/fluxcd/source-controller/pull/1357) + [#1353](https://github.com/fluxcd/source-controller/pull/1353) + [#1347](https://github.com/fluxcd/source-controller/pull/1347) + [#1343](https://github.com/fluxcd/source-controller/pull/1343) + [#1340](https://github.com/fluxcd/source-controller/pull/1340) + [#1338](https://github.com/fluxcd/source-controller/pull/1338) + [#1336](https://github.com/fluxcd/source-controller/pull/1336) + [#1334](https://github.com/fluxcd/source-controller/pull/1334) + +## 1.2.3 + +**Release date:** 2023-12-14 + +This patch release updates the controller's Helm dependency to v3.13.3. + +Improvements: +- Update Helm to v3.13.3 + [#1325](https://github.com/fluxcd/source-controller/pull/1325) +- helmrepo: Remove migration log/event + [#1324](https://github.com/fluxcd/source-controller/pull/1324) + +## 1.2.2 + +**Release date:** 2023-12-11 + +This patch release addresses an issue with AWS ECR authentication introduced in +v1.2.0. + +In addition, a variety of dependencies have been updated. Including an update +of the container base image to Alpine v3.19. + +Fixes: +- Address issue with authenticating towards AWS ECR + [#1318](https://github.com/fluxcd/source-controller/pull/1318) + [#1321](https://github.com/fluxcd/source-controller/pull/1318) + +Improvements: + +- Update dependencies + [#1314](https://github.com/fluxcd/source-controller/pull/1314) + [#1318](https://github.com/fluxcd/source-controller/pull/1318) + [#1321](https://github.com/fluxcd/source-controller/pull/1321) +- build: update Alpine to 3.19 + [#1316](https://github.com/fluxcd/source-controller/pull/1316) + +## 1.2.1 + +**Release date:** 2023-12-08 + +This patch release ensures the controller is built with the latest Go `1.21.x` +release, to mitigate multiple security vulnerabilities which were published +shortly after the release of v1.2.0. + +In addition, a small number of dependencies have been updated to their latest +version. + +Improvements: +- Update dependencies + [#1309](https://github.com/fluxcd/source-controller/pull/1309) + +## 1.2.0 + +**Release date:** 2023-12-05 + +This minor release comes with API changes, bug fixes and several new features. + +### Bucket + +A new field, `.spec.prefix`, has been added to the Bucket API, which enables +server-side filtering of files if the object's `.spec.provider` is set to +`generic`/`aws`/`gcp`. + +### OCIRepository and HelmChart + +Two new fields, `.spec.verify.matchOIDCIdentity.issuer` and +`.spec.verify.matchOIDCIdentity.subject` have been added to the HelmChart and +OCIRepository APIs. If the image has been keylessly signed via Cosign, these +fields can be used to verify the OIDC issuer of the Fulcio certificate and the +OIDC identity's subject respectively. + +### HelmRepository + +A new boolean field, `.spec.insecure`, has been introduced to the HelmRepository +API, which allows connecting to a non-TLS HTTP container registry. It is only +considered if the object's `.spec.type` is set to `oci`. + +From this release onwards, HelmRepository objects of type OCI are treated as +static objects, i.e. they have an empty status. +Existing objects undergo a one-time automatic migration and new objects +will be undergo a one-time reconciliation to remove any status fields. + +Additionally, the controller now performs a shallow clone if the +`.spec.ref.name` of the GitRepository object points to a branch or a tag. + +Furthermore, a bug has been fixed, where the controller would try to +authenticate against public OCI registries if the HelmRepository object has a +reference to a Secret containing a CA certificate. + +Lastly, dependencies have been updated to their latest version, including an +update of Kubernetes to v1.28.4. + +Fixes: +- Address miscellaneous issues throughout code base + [#1257](https://github.com/fluxcd/source-controller/pull/1257) +- helmrepo: only configure tls login option when required + [#1289](https://github.com/fluxcd/source-controller/pull/1289) +- oci: rename `OCIChartRepository.insecure` to `insecureHTTP` + [#1299](https://github.com/fluxcd/source-controller/pull/1299) +- Use bitnami Minio oci chart for e2e + [#1301](https://github.com/fluxcd/source-controller/pull/1301) + +Improvements: +- build(deps): bump Go dependencies + [#1260](https://github.com/fluxcd/source-controller/pull/1260) + [#1261](https://github.com/fluxcd/source-controller/pull/1261) + [#1269](https://github.com/fluxcd/source-controller/pull/1269) + [#1291](https://github.com/fluxcd/source-controller/pull/1291) +- build(deps): bump the ci group dependencies + [#1265](https://github.com/fluxcd/source-controller/pull/1265) + [#1266](https://github.com/fluxcd/source-controller/pull/1266) + [#1272](https://github.com/fluxcd/source-controller/pull/1272) + [#1277](https://github.com/fluxcd/source-controller/pull/1277) + [#1281](https://github.com/fluxcd/source-controller/pull/1281) + [#1285](https://github.com/fluxcd/source-controller/pull/1285) + [#1296](https://github.com/fluxcd/source-controller/pull/1296) + [#1303](https://github.com/fluxcd/source-controller/pull/1303) +- bucket: Add prefix filtering capability + [#1228](https://github.com/fluxcd/source-controller/pull/1228) +- Static HelmRepository OCI + [#1243](https://github.com/fluxcd/source-controller/pull/1243) +- cosign: allow identity matching for keyless verification + [#1250](https://github.com/fluxcd/source-controller/pull/1250) +- Upgrade `go-git` to v5.10.0 + [#1271](https://github.com/fluxcd/source-controller/pull/1271) +- storage: change default file permissions + [#1276](https://github.com/fluxcd/source-controller/pull/1276) +- Update dependencies to Kubernetes v1.28 + [#1286](https://github.com/fluxcd/source-controller/pull/1286) +- Add `.spec.insecure` to `HelmRepository` for `type: oci` + [#1288](https://github.com/fluxcd/source-controller/pull/1288) +- Update Git dependencies + [#1300](https://github.com/fluxcd/source-controller/pull/1300) +- Update Go dependencies + [#1304](https://github.com/fluxcd/source-controller/pull/1304) + +## 1.1.2 + +**Release date:** 2023-10-11 + +This patch release fixes a bug where OCIRepository objects can't be consumed +when the OCI image layer contains symlinks. + +Fixes: +- oci: Skip symlinks found in upstream artifacts + [#1246](https://github.com/fluxcd/source-controller/pull/1246/) + +Improvements: +- build(deps): bump the ci group with 1 update + [#1256](https://github.com/fluxcd/source-controller/pull/1256) + +## 1.1.1 + +**Release date:** 2023-09-18 + +This is a patch release that fixes a regression introduced in v1.1.0 where +HelmRepository objects would not be reconciled if they provided a TLS Secret +using `.spec.secretRef` with a type other than `Opaque` or `kubernetes.io/tls`. + +In addition, the URL lookup strategy for Buckets has been changed from path to +auto, to widen support for S3-compatible object storage services. + +Lastly, several dependencies have been updated to their latest versions. + +Fixes: +- bucket: use auto lookup type + [#1222](https://github.com/fluxcd/source-controller/pull/1222) +- helmrepo: fix Secret type check for TLS via `.spec.secretRef` + [#1225](https://github.com/fluxcd/source-controller/pull/1225) +- Upgrade github.com/fluxcd/pkg/{git,git/gogit} + [#1236](https://github.com/fluxcd/source-controller/pull/1236) + +Improvements: +- build(deps): bump the ci group dependencies + [#1213](https://github.com/fluxcd/source-controller/pull/1213) + [#1224](https://github.com/fluxcd/source-controller/pull/1224) + [#1230](https://github.com/fluxcd/source-controller/pull/1230) + [#1235](https://github.com/fluxcd/source-controller/pull/1235) +- docs: Add missing pem-encoding reference + [#1216](https://github.com/fluxcd/source-controller/pull/1216) +- build(deps): bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 + [#1227](https://github.com/fluxcd/source-controller/pull/1227) + +## 1.1.0 + +**Release date:** 2023-08-23 + +This minor release comes with API changes, bug fixes and several new features. + +All APIs that accept TLS data have been modified to adopt Secrets of type +`kubernetes.io/tls`. This includes: +* HelmRepository: The field `.spec.secretRef` has been __deprecated__ in favor +of a new field [`.spec.certSecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1beta2/helmrepositories.md#cert-secret-reference). + This field is also supported by OCI HelmRepositories. +* OCIRepository: Support for the`caFile`, `keyFile` and `certFile` keys in the + Secret specified in [`.spec.certSecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1beta2/ocirepositories.md#cert-secret-reference) + have been __deprecated__ in favor of `ca.crt`, `tls.key` and `tls.crt`. + Also, the Secret now must be of type `Opaque` or `kubernete.io/tls`. +* GitRepository: CA certificate can now be provided in the Secret sepcified in + `.spec.secretRef` using the `ca.crt` key, which takes precedence over the + existing `caFile` key. + +Furthermore, GitRepository has a couple of new features: +* Proxy support: A new field [`.spec.proxySecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1/gitrepositories.md#proxy-secret-reference) + has been introduced which can be used to specify the proxy configuration to + use for all remote Git operations related to the particular object. +* Tag verification: The field [`.spec.verification.mode`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1/gitrepositories.md#verification) + now supports the following values: + * HEAD: Verify the HEAD of the Git repository. + * Tag: Verify the tag specified in `.spec.ref` + * TagAndHead: Verify the tag specified in `.spec.ref` and the commit it + points to. + +Starting with this version, the controller now stops exporting an object's +metrics as soon as the object has been deleted. + +In addition, the controller now consumes significantly less CPU and memory when +reconciling Helm repository indexes. + +Lastly, a new flag `--interval-jitter-percentage` has been introduced which can +be used to specify a jitter to the reconciliation interval in order to +distribute the load more evenly when multiple objects are set up with the same +interval. + +Improvements: +- gitrepo: Add support for specifying proxy per `GitRepository` + [#1109](https://github.com/fluxcd/source-controller/pull/1109) +- helmrepo: add `.spec.certSecretRef` for specifying TLS auth data + [#1160](https://github.com/fluxcd/source-controller/pull/1160) +- Update docs on Azure identity + [#1167](https://github.com/fluxcd/source-controller/pull/1167) +- gitrepo: document limitation of `spec.ref.name` with Azure Devops + [#1175](https://github.com/fluxcd/source-controller/pull/1175) +- ocirepo: add cosign support for insecure HTTP registries + [#1176](https://github.com/fluxcd/source-controller/pull/1176) +- Handle delete before adding finalizer + [#1177](https://github.com/fluxcd/source-controller/pull/1177) +- Store Helm indexes in JSON format + [#1178](https://github.com/fluxcd/source-controller/pull/1178) +- Unpin go-git and update to v5.8.1 + [#1179](https://github.com/fluxcd/source-controller/pull/1179) +- controller: jitter requeue interval + [#1184](https://github.com/fluxcd/source-controller/pull/1184) +- cache: ensure new expiration is persisted + [#1185](https://github.com/fluxcd/source-controller/pull/1185) +- gitrepo: add support for Git tag verification + [#1187](https://github.com/fluxcd/source-controller/pull/1187) +- Update dependencies + [#1191](https://github.com/fluxcd/source-controller/pull/1191) +- Adopt Kubernetes style TLS Secrets + [#1194](https://github.com/fluxcd/source-controller/pull/1194) +- Update dependencies + [#1196](https://github.com/fluxcd/source-controller/pull/1196) +- Helm OCI: Add support for TLS registries with self-signed certs + [#1197](https://github.com/fluxcd/source-controller/pull/1197) +- Update dependencies + [#1202](https://github.com/fluxcd/source-controller/pull/1202) +- Preserve url encoded path in normalized helm repository URL + [#1203](https://github.com/fluxcd/source-controller/pull/1203) +- Fix link ref in API docs + [#1204](https://github.com/fluxcd/source-controller/pull/1204) + +Fixes: +- Fix the helm cache arguments + [#1170](https://github.com/fluxcd/source-controller/pull/1170) +- Delete stale metrics on object delete + [#1183](https://github.com/fluxcd/source-controller/pull/1183) +- Disable system-wide git config in tests + [#1192](https://github.com/fluxcd/source-controller/pull/1192) +- Fix links in API docs + [#1200](https://github.com/fluxcd/source-controller/pull/1200) + +## 1.0.1 + +**Release date:** 2023-07-10 + +This is a patch release that fixes the AWS authentication for cross-region ECR repositories. + +Fixes: +- Update `fluxcd/pkg/oci` to fix ECR cross-region auth + [#1158](https://github.com/fluxcd/source-controller/pull/1158) + +## 1.0.0 + +**Release date:** 2023-07-03 + +This is the first stable release of the controller. From now on, this controller +follows the [Flux 2 release cadence and support pledge](https://fluxcd.io/flux/releases/). + +Starting with this version, the build, release and provenance portions of the +Flux project supply chain [provisionally meet SLSA Build Level 3](https://fluxcd.io/flux/security/slsa-assessment/). + +This release includes several minor changes that primarily focus on addressing +forgotten and obsolete bits in the logic related to GitRepository objects. + +Including a removal of the `OptimizedGitClones` feature flag. If your +Deployment is configured to disable this flag, you should remove it. + +In addition, dependencies have been updated to their latest version, including +an update of Kubernetes to v1.27.3. + +For a comprehensive list of changes since `v0.36.x`, please refer to the +changelog for [v1.0.0-rc.1](#100-rc1), [v1.0.0-rc.3](#100-rc3) and +[`v1.0.0-rc.4`](#100-rc4). + +Improvements: +- gitrepo: remove `OptimizedGitClones` as a feature gate + [#1124](https://github.com/fluxcd/source-controller/pull/1124) + [#1126](https://github.com/fluxcd/source-controller/pull/1126) +- Update dependencies + [#1127](https://github.com/fluxcd/source-controller/pull/1127) + [#1147](https://github.com/fluxcd/source-controller/pull/1147) +- Update Cosign to v2.1.0 + [#1132](https://github.com/fluxcd/source-controller/pull/1132) +- Align `go.mod` version with Kubernetes (Go 1.20) + [#1134](https://github.com/fluxcd/source-controller/pull/1134) +- Add the verification key to the GitRepository verified status condition +- [#1136](https://github.com/fluxcd/source-controller/pull/1136) +- gitrepo: remove obsolete proxy docs + [#1144](https://github.com/fluxcd/source-controller/pull/1144) + +## 1.0.0-rc.5 + +**Release date:** 2023-06-01 + +This release candidate fixes a regression introduced in `1.0.0.-rc.4` where +support for Git servers that exclusively use v2 of the wire protocol like Azure +Devops and AWS CodeCommit was broken. + +Lastly, the controller's dependencies were updated to mitigate CVE-2023-33199. + +Improvements: +- build(deps): bump github.com/sigstore/rekor from 1.1.1 to 1.2.0 + [#1107](https://github.com/fluxcd/source-controller/pull/1107) + +Fixes: +- Bump `fluxcd/pkg/git/gogit` to v0.12.0 + [#1111](https://github.com/fluxcd/source-controller/pull/1111) + +## 1.0.0-rc.4 + +**Release date:** 2023-05-26 + +This release candidate comes with support for Kubernetes v1.27 and Cosign v2. +It also enables the use of annotated Git tags with `.spec.ref.name` in +`GitRepository`. Furthermore, it fixes a bug related to accessing Helm OCI +charts on ACR using OIDC auth. + +Improvements: +- build(deps): bump helm/kind-action from 1.5.0 to 1.7.0 + [#1100](https://github.com/fluxcd/source-controller/pull/1100) +- build(deps): bump sigstore/cosign-installer from 3.0.3 to 3.0.5 + [#1101](https://github.com/fluxcd/source-controller/pull/1101) +- build(deps): bump actions/setup-go from 4.0.0 to 4.0.1 + [#1102](https://github.com/fluxcd/source-controller/pull/1102) +- Update cosign to v2 + [#1096](https://github.com/fluxcd/source-controller/pull/1096) +- build(deps): bump github.com/sigstore/rekor from 0.12.1-0.20220915152154-4bb6f441c1b2 to 1.1.1 + [#1083](https://github.com/fluxcd/source-controller/pull/1083) +- Update controller-runtime and Kubernetes dependencies + [#1104](https://github.com/fluxcd/source-controller/pull/1104) +- Update dependencies; switch to `go-git/go-git` and `pkg/tar` + [#1105](https://github.com/fluxcd/source-controller/pull/1105) + +## 1.0.0-rc.3 + +**Release date:** 2023-05-12 + +This release candidate introduces the verification of the Artifact digest in +storage during reconciliation. This ensures that the Artifact is not tampered +with after it was written to storage. When the digest does not match, the +controller will emit a warning event and remove the file from storage, forcing +the Artifact to be re-downloaded. + +In addition, files with executable permissions are now archived with their mode +set to `0o744` instead of `0o644`. Allowing the extracted file to be executable +by the user. + +Lastly, the controller's dependencies were updated to mitigate CVE-2023-1732 +and CVE-2023-2253, and the controller base image was updated to Alpine 3.18. + +Improvements: +- Verify digest of Artifact in Storage + [#1088](https://github.com/fluxcd/source-controller/pull/1088) +- build(deps): bump github.com/cloudflare/circl from 1.3.2 to 1.3.3 + [#1092](https://github.com/fluxcd/source-controller/pull/1092) +- build(deps): bump github.com/docker/distribution from 2.8.1+incompatible to 2.8.2+incompatible + [#1093](https://github.com/fluxcd/source-controller/pull/1093) +- storage: set `0o744` for files with exec mode set + [#1094](https://github.com/fluxcd/source-controller/pull/1094) + +## 1.0.0-rc.2 + +**Release date:** 2023-05-09 + +This release candidate comes with various updates to the controller's dependencies, +most notable, Helm was updated to v3.11.3. + +Improvements: +- Update dependencies + [#1086](https://github.com/fluxcd/source-controller/pull/1086) +- Set RecoverPanic globally across controllers + [#1077](https://github.com/fluxcd/source-controller/pull/1077) +- Move controllers to internal/controller + [#1076](https://github.com/fluxcd/source-controller/pull/1076) + +## 1.0.0-rc.1 + +**Release date:** 2023-03-30 + +This release candidate promotes the `GitRepository` API from `v1beta2` to `v1`. +The controller now supports horizontal scaling using +sharding based on a label selector. + +In addition, support for Azure Workload Identity was added to +`OCIRepositories`, `Buckets` and `HelmRepositories` when using `provider: azure`. + +### Highlights + +#### API changes + +The `GitRepository` kind was promoted from v1beta2 to v1 (GA) and deprecated fields were removed. + +The common types `Artifact`, `Conditions` and the `Source` interface were promoted to v1. + +The `gitrepositories.source.toolkit.fluxcd.io` CRD contains the following versions: +- v1 (storage version) +- v1beta2 (deprecated) +- v1beta1 (deprecated) + +#### Upgrade procedure + +The `GitRepository` v1 API is backwards compatible with v1beta2, except for the following: +- the deprecated field `.spec.gitImplementation` was removed +- the unused field `.spec.accessFrom` was removed +- the deprecated field `.status.contentConfigChecksum` was removed +- the deprecated field `.status.artifact.checksum` was removed +- the `.status.url` was removed in favor of the absolute `.status.artifact.url` + +To upgrade from v1beta2, after deploying the new CRD and controller, +set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that +contain `GitRepository` definitions and remove the deprecated fields if any. +Bumping the API version in manifests can be done gradually. +It is advised to not delay this procedure as the beta versions will be removed after 6 months. + +#### Sharding + +Starting with this release, the controller can be configured with +`--watch-label-selector`, after which only objects with this label will +be reconciled by the controller. + +This allows for horizontal scaling, where source-controller +can be deployed multiple times with a unique label selector +which is used as the sharding key. + +Note that this also requires configuration of the `--storage-adv-addr` +to a unique address (in combination with a proper Service definition). +This to ensure the Artifacts handled by the sharding controller point +to a unique endpoint. + +In addition, Source object kinds which have a dependency on another +kind (i.e. a HelmChart on a HelmRepository) need to have the same +labels applied to work as expected. + +### Full changelog + +Improvements: +- GA: Promote `GitRepository` API to `source.toolkit.fluxcd.io/v1` + [#1056](https://github.com/fluxcd/source-controller/pull/1056) +- Add reconciler sharding capability based on label selector + [#1059](https://github.com/fluxcd/source-controller/pull/1059) +- Support Azure Workload Identity + [#1048](https://github.com/fluxcd/source-controller/pull/1048) +- Update dependencies + [#1062](https://github.com/fluxcd/source-controller/pull/1062) +- Update workflows + [#1054](https://github.com/fluxcd/source-controller/pull/1054) + +## 0.36.1 + +**Release date:** 2023-03-20 + +This release fixes a bug where after reading a `.sourceignore` file in a +subdirectory, the controller could start to ignore files from directories next +to the directory the `.sourceignore` file was placed in. + +Fixes: +- Update sourceignore to fix pattern domain bug + [#1050](https://github.com/fluxcd/source-controller/pull/1050) + +## 0.36.0 + +**Release date:** 2023-03-08 + +This release changes the format of the Artifact `Revision` field when using a +GitRepository with a `.spec.ref.name` set (introduced in [`v0.35.0`](#0350)), +changing it from `sha1:` to `@sha1:`. Offering a more +precise reflection of the revision the Artifact was created from. + +In addition, `klog` is now configured to log using the same logger as the rest +of the controller (providing a consistent log format). + +Lastly, the controller is now built using Go `1.20`, and the dependencies have +been updated to their latest versions. + +Improvements: +- Advertise absolute reference in Artifact for GitRepository name ref + [#1036](https://github.com/fluxcd/source-controller/pull/1036) +- Update Go to 1.20 + [#1040](https://github.com/fluxcd/source-controller/pull/1040) +- Update dependencies + [#1040](https://github.com/fluxcd/source-controller/pull/1040) + [#1041](https://github.com/fluxcd/source-controller/pull/1041) + [#1043](https://github.com/fluxcd/source-controller/pull/1043) +- Use `logger.SetLogger` to also configure `klog` + [#1044](https://github.com/fluxcd/source-controller/pull/1044) + +## 0.35.2 + +**Release date:** 2023-02-23 + +This release reduces the amount of memory consumed by the controller when +reconciling HelmRepositories, by using only the digest of the YAML file as the +Revision of the Artifact instead of the stable sorted version of the entire +index. This aligns with the behavior before `v0.35.0`, and is therefore +considered a bug fix. + +In addition, the dependencies have been updated to include some minor security +patches. + +Note that `v0.35.0` contains breaking changes. Please refer to the [changelog +entry](#0350) for more information. + +Fixes: +- helm: only use Digest to calculcate index revision + [#1035](https://github.com/fluxcd/source-controller/pull/1035) + +Improvements: +- Update dependencies + [#1036](https://github.com/fluxcd/source-controller/pull/1036) + +## 0.35.1 + +**Release date:** 2023-02-17 + +This release addresses a hypothetical issue with the Artifact `Digest` field +validation, where a patch of the Artifact could fail to be applied to an object +due to the lack of an `omitempty` tag on the optional field. In reality, this +issue is not possible to encounter, as the `Digest` field is always set when +the Artifact is created. + +Note that `v0.35.0` contains breaking changes. Please refer to the [changelog +entry](#0350) for more information. + +Fixes: +- api: omit empty Digest in Artifact + [#1031](https://github.com/fluxcd/source-controller/pull/1031) + +## 0.35.0 + +**Release date:** 2023-02-16 + +This release introduces a new format for the Artifact `Revision`, and deprecates +the `Checksum` field in favor of a new `Digest` field. In addition, it adds +support for Git reference names in a GitRepository, and comes with the usual +collection of dependency updates. + +### Highlights + +#### Support for Git reference names + +Starting with this version, it is possible to define a [Git Reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References) +in a GitRepository using `.spec.ref.name`. + +This opens the door to a range of functionalities not available before, as it +for example allows the controller to follow pull (`refs/pull//head`) or +merge (`refs/merge-requests//head`) requests, and allows a transition from +the HEAD of a branch (`refs/heads/main`) to a tag (`refs/tags/v0.1.0`) by +changing a single field value. + +Refer to the [GitRepository specification](https://github.com/fluxcd/source-controller/blob/v0.35.0/docs/spec/v1beta2/gitrepositories.md#name-example) +for more details. + +#### Introduction of Artifact Digest + +The Artifact of a Source will now advertise a `Digest` field containing the +checksum of the file advertised in the `Path`, and the alias of the algorithm +used to calculate it. Creating a "digest" in the format of `:`. + +The algorithm is configurable using the newly introduced `--artifact-digest-algo` +flag, which allows configuration of other algorithms (`sha384`, `sha512`, and +`blake3`) than the hardcoded `sha256` default of the [now deprecated `Checksum` +field](#deprecation-of-artifact-checksum). + +Please note that until the `Checksum` is fully deprecated, changing the +algorithm is not yet advised (albeit supported), as this will result in a +double computation. + +### :warning: Breaking changes + +#### Artifact Revision format + +The `Revision` format for an Artifact consisting of a named pointer (a Git +branch or tag) and/or a specific revision (a Git commit SHA or other calculated +checksum) has changed to contain an `@` separator opposed to `/`, and includes +the algorithm alias as a prefix to a checksum (creating a "digest"). +In addition, `HEAD` is no longer used as a named pointer for exact commit +references, but will now only advertise the commit itself. + +For example: + +- `main/1eabc9a41ca088515cab83f1cce49eb43e84b67f` => `main@sha1:1eabc9a41ca088515cab83f1cce49eb43e84b67f` +- `HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738` => `sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738` +- `tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc` => `tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc` +- `8fb62a09c9e48ace5463bf940dc15e85f525be4f230e223bbceef6e13024110c` => `sha256:8fb62a09c9e48ace5463bf940dc15e85f525be4f230e223bbceef6e13024110c` + +When the storage of the controller is backed by a Persistent Volume, the +rollout of this new format happens for the next new revision the controller +encounters. Otherwise, the new revision will be advertised as soon as the +Artifact has been reproduced after the controller is deployed. + +Other Flux controllers making use of an Artifact are aware of the change in +format, and work with it in a backwards compatible manner. Avoiding observing +a change of revision when this is actually just a change of format. If you +programmatically make use of the Revision, please refer to [the +`TransformLegacyRevision` helper](https://github.com/fluxcd/source-controller/blob/api/v0.35.0/api/v1beta2/artifact_types.go#L121) +to allow a transition period in your application. + +For more information around this change, refer to +[RFC-0005](https://github.com/fluxcd/flux2/tree/main/rfcs/0005-artifact-revision-and-digest#establish-an-artifact-revision-format). + +#### Deprecation of Artifact Checksum + +The `Checksum` field of an Artifact has been deprecated in favor of the newly +introduced `Digest`. Until the deprecated field is removed in the next version +of the API, the controller will continue to produce the SHA-256 checksum in +addition to the digest. Changing the algorithm used to produce the digest using +`--artifact-digest-algo` is therefore not yet advised (albeit supported), as +this will result in a double computation. + +For more information around this change, refer to +[RFC-0005](https://github.com/fluxcd/flux2/tree/main/rfcs/0005-artifact-revision-and-digest#introduce-a-digest-field). + +### Full changelog + +Improvements: +- Introduction of Digest and change of Revision format + [#1001](https://github.com/fluxcd/source-controller/pull/1001) +- Improve HelmRepository type switching from default to oci + [#1016](https://github.com/fluxcd/source-controller/pull/1016) +- Apply default permission mode to all files/dirs in an artifact archive + [#1020](https://github.com/fluxcd/source-controller/pull/1020) +- Add support for checking out Git references + [#1026](https://github.com/fluxcd/source-controller/pull/1026) +- Update dependencies + [#1025](https://github.com/fluxcd/source-controller/pull/1025) + [#1028](https://github.com/fluxcd/source-controller/pull/1028) + [#1030](https://github.com/fluxcd/source-controller/pull/1030) + +Fixes: +- Normalize Helm repository URL with query params properly + [#1015](https://github.com/fluxcd/source-controller/pull/1015) +- Prevent panic when cloning empty Git repository + [#1021](https://github.com/fluxcd/source-controller/pull/1021) + +## 0.34.0 + +**Release date:** 2023-01-31 + +This prerelease comes with support for HTTPS bearer token authentication for Git +repository. The GitRepository authentication Secret is expected to contain the +bearer token in `.data.bearerToken`. + +The caching of Secret and ConfigMap resources is disabled by +default to improve memory usage. To opt-out from this behavior, start the +controller with: `--feature-gates=CacheSecretsAndConfigMaps=true`. + +All the Source kinds now support progressive status updates. The progress made +by the controller during reconciliation of a Source is reported immediately in +the status of the Source object. + +In addition, the controller dependencies have been updated to Kubernetes v1.26. + +:warning: **Breaking change:** When using SSH authentication in GitRepository, +if the referenced Secret contained `.data.username`, it was used as the SSH +user. With this version, SSH user will be the username in the SSH address. For +example, if the Git repository address is `ssh://flux@example.com`, `flux` will +be used as the SSH user during SSH authentication. When no username is +specified, `git` remains the default SSH user. + +Improvements: +- Garbage collection lock file ignore tests + [#992](https://github.com/fluxcd/source-controller/pull/992) +- purge minio test container at the end of tests + [#993](https://github.com/fluxcd/source-controller/pull/993) +- Introduce Progressive status + [#974](https://github.com/fluxcd/source-controller/pull/974) +- build(deps): bump github.com/containerd/containerd from 1.6.10 to 1.6.12 + [#997](https://github.com/fluxcd/source-controller/pull/997) +- fix typo in helmRepo secretRef spec CRD + [#996](https://github.com/fluxcd/source-controller/pull/996) +- Fix OCIRepository testdata permissions + [#998](https://github.com/fluxcd/source-controller/pull/998) +- Set rate limiter option in test reconcilers + [#999](https://github.com/fluxcd/source-controller/pull/999) +- Update git dependencies for bearer token support + [#1003](https://github.com/fluxcd/source-controller/pull/1003) +- Document support for bearer token authentication over https in gitrepositories + [#1000](https://github.com/fluxcd/source-controller/pull/1000) +- Disable caching of secrets and configmaps + [#989](https://github.com/fluxcd/source-controller/pull/989) +- Update dependencies + [#1008](https://github.com/fluxcd/source-controller/pull/1008) +- build: Enable SBOM and SLSA Provenance + [#1009](https://github.com/fluxcd/source-controller/pull/1009) +- Add note about sourceignore recursion + [#1007](https://github.com/fluxcd/source-controller/pull/1007) +- CI: Replace engineerd/setup-kind with helm/kind-action + [#1010](https://github.com/fluxcd/source-controller/pull/1010) +- helm/oci: Add context to chart download failure + [#1013](https://github.com/fluxcd/source-controller/pull/1013) + +## 0.33.0 + +**Release date:** 2022-12-20 + +This prerelease comes with dedicated mux for the controller's fileserver. All code references to `libgit2` were removed, and the `spec.gitImplementation` +field is no longer being honored, but rather `go-git` is used. +For more information, refer to version 0.32.0's changelog, which started `libgit2`'s +deprecation process. + +The controller's garbage collection now takes into consideration +lock files. + +The feature gate `ForceGoGitImplementation` was removed, users passing it as their +controller's startup args will need to remove it before upgrading. + +Fixes: +- git: Fix issue with recurseSubmodules + [#975](https://github.com/fluxcd/source-controller/pull/975) +- Fix aliased chart dependencies resolution + [#988](https://github.com/fluxcd/source-controller/pull/988) + +Improvements: +- fileserver: Use new ServeMux + [#972](https://github.com/fluxcd/source-controller/pull/972) +- Remove libgit2 and git2go from codebase + [#977](https://github.com/fluxcd/source-controller/pull/977) +- Use Event v1 API metadata keys in notifications + [#990](https://github.com/fluxcd/source-controller/pull/990) +- storage: take lock files into consideration while garbage collecting + [#991](https://github.com/fluxcd/source-controller/pull/991) +- Migrate to Go Native fuzz and improve reliability + [#965](https://github.com/fluxcd/source-controller/pull/965) +- build: Add tidy to make verify + [#966](https://github.com/fluxcd/source-controller/pull/966) +- build: Add postbuild script for fuzzing + [#968](https://github.com/fluxcd/source-controller/pull/968) +- build: Link libgit2 via LIB_FUZZING_ENGINE + [#969](https://github.com/fluxcd/source-controller/pull/969) +- GitRepo: git impl. deprecation test cleanup + [#980](https://github.com/fluxcd/source-controller/pull/980) +- minio: use container image for tests + [#981](https://github.com/fluxcd/source-controller/pull/981) +- helm: Update SDK to v3.10.3 + [#982](https://github.com/fluxcd/source-controller/pull/982) +- Update fluxcd/pkg/oci dependency + [#983](https://github.com/fluxcd/source-controller/pull/983) +- Update dependencies + [#985](https://github.com/fluxcd/source-controller/pull/985) + +## 0.32.1 + +**Release date:** 2022-11-18 + +This prerelease rectifies the `v0.32.0` release by retracting the previous Go +version, bumping the controller api version and the controller deployment. + +## 0.32.0 + +**Release date:** 2022-11-17 + +This prerelease comes with a major refactoring of the controller's Git operations. +The `go-git` implementation now supports all Git servers, including +Azure DevOps, which previously was only supported by `libgit2`. + +This version initiates the soft deprecation of the `libgit2` implementation. +The motivation for removing support for `libgit2` being: +- Reliability: over the past months we managed to substantially reduce the +issues users experienced, but there are still crashes happening when the controller +runs over longer periods of time, or when under intense GC pressure. +- Performance: due to the inherit nature of `libgit2` implementation, which +is a C library called via CGO through `git2go`, it will never perform as well as +a pure Go implementations. At scale, memory pressure insues which then triggers +the reliability issues above. +- Lack of Shallow Clone Support. +- Maintainability: supporting two Git implementations is a big task, even more +so when one of them is in a complete different tech stack. Given its nature, to +support `libgit2`, we have to maintain an additional repository. Statically built +`libgit2` libraries need to be cross-compiled for all our supported platforms. +And a lot of "unnecessary" code has to be in place to make building, testing and +fuzzing work seamlessly. + +As a result the field `spec.gitImplementation` is ignored and the +reconciliations will use `go-git`. To opt-out from this behaviour, start +the controller with: `--feature-gates=ForceGoGitImplementation=false`. + +Users having any issues with `go-git` should report it to the Flux team, +so any issues can be resolved before support for `libgit2` is completely +removed from the codebase. + +Improvements: +- Refactor Git operations and introduce go-git support for Azure DevOps and AWS CodeCommit + [#944](https://github.com/fluxcd/source-controller/pull/944) +- Use Flux Event API v1beta1 + [#952](https://github.com/fluxcd/source-controller/pull/952) +- gogit: Add new ForceGoGitImplementation FeatureGate + [#945](https://github.com/fluxcd/source-controller/pull/945) +- Remove nsswitch.conf creation from Dockerfile + [#958](https://github.com/fluxcd/source-controller/pull/958) +- Update dependencies + [#960](https://github.com/fluxcd/source-controller/pull/960) + [#950](https://github.com/fluxcd/source-controller/pull/950) + [#959](https://github.com/fluxcd/source-controller/pull/959) +- Upgrade to azure-sdk-for-go/storage/azblob v0.5.1 + [#931](https://github.com/fluxcd/source-controller/pull/931) + +## 0.31.0 + +**Release date:** 2022-10-21 + +This prerelease comes with support for Cosign verification of Helm charts. +The signatures verification can be configured by setting `HelmChart.spec.verify` with +`provider` as `cosign` and a `secretRef` to a secret containing the public key. +Cosign keyless verification is also supported, please see the +[HelmChart API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.31.0/docs/spec/v1beta2/helmcharts.md#verification) +for more details. + +In addition, the controller dependencies have been updated +to Kubernetes v1.25.3 and Helm v3.10.1. + +Improvements: +- Implement Cosign verification for HelmCharts + [#925](https://github.com/fluxcd/source-controller/pull/925) +- Update dependencies + [#942](https://github.com/fluxcd/source-controller/pull/942) + +Fixes: +- Allow deleting suspended objects + [#937](https://github.com/fluxcd/source-controller/pull/937) + +## 0.30.1 + +**Release date:** 2022-10-10 + +This prerelease enables the use of container-level SAS tokens when using `Bucket` objects +to access Azure Storage. The Azure SDK error message has also been enriched to hint Flux +users the potential reasons in case of failure. + +Improvements: +- List objects when checking if bucket exists to allow use of container-level SAS token + [#906](https://github.com/fluxcd/source-controller/pull/906) + +## 0.30.0 + +**Release date:** 2022-09-29 + +This prerelease adds support for Cosign verification in `OCIRepository` source. +The signatures verification can be configured by setting `OCIRepository.spec.verify` with +`provider` as `cosign` and a `secretRef` to a secret containing the public key. +Cosign keyless verification is also supported, please see the +[OCIRepository API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.30.0/docs/spec/v1beta2/ocirepositories.md#verification) +for more details. + +It also comes with strict validation rules for API fields which define a +(time) duration. Effectively, this means values without a time unit (e.g. `ms`, +`s`, `m`, `h`) will now be rejected by the API server. To stimulate sane +configurations, the units `ns`, `us` and `µs` can no longer be configured, nor +can `h` be set for fields defining a timeout value. + +In addition, the controller dependencies have been updated +to Kubernetes controller-runtime v0.13. + +:warning: **Breaking changes:** +- `.spec.interval` new validation pattern is `"^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"` +- `.spec.timeout` new validation pattern is `"^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"` + +Improvements: +- api: add custom validation for v1.Duration types + [#903](https://github.com/fluxcd/source-controller/pull/903) +- [RFC-0003] Implement OCIRepository verification using Cosign + [#876](https://github.com/fluxcd/source-controller/pull/876) +- Consider bipolarity conditions in Ready condition summarization + [#907](https://github.com/fluxcd/source-controller/pull/907) +- Update Bucket related SDK dependencies + [#911](https://github.com/fluxcd/source-controller/pull/911) +- Add custom CA certificates to system certificates + [#904](https://github.com/fluxcd/source-controller/pull/904) +- [OCIRepository] Optimise OCI artifacts reconciliation + [#913](https://github.com/fluxcd/source-controller/pull/913) +- Update dependencies + [#919](https://github.com/fluxcd/source-controller/pull/919) +- Build with Go 1.19 + [#920](https://github.com/fluxcd/source-controller/pull/920) +- Bump libgit2 image and disable cosign verification for CI + [#921](https://github.com/fluxcd/source-controller/pull/921) +- OCIRepositoryReconciler no-op improvements + [#917](https://github.com/fluxcd/source-controller/pull/917) +- Accept a slice of remote.Option for cosign verification + [#916](https://github.com/fluxcd/source-controller/pull/916) +- Update pkg/oci to v0.11.0 + [#922](https://github.com/fluxcd/source-controller/pull/922) + +Fixes: +- Handle nil OCI authenticator with malformed registry + [#897](https://github.com/fluxcd/source-controller/pull/897) + +## 0.29.0 + +**Release date:** 2022-09-09 + +This prerelease adds support for non-TLS container registries such +as [Kubernetes Kind Docker Registry](https://kind.sigs.k8s.io/docs/user/local-registry/). +Connecting to an in-cluster registry over plain HTTP, +requires setting the `OCIRepository.spec.insecure` field to `true`. + +:warning: **Breaking change:** The controller logs have been aligned +with the Kubernetes structured logging. For more details on the new logging +structure please see: [fluxcd/flux2#3051](https://github.com/fluxcd/flux2/issues/3051). + +Improvements: +- Align controller logs to Kubernetes structured logging + [#882](https://github.com/fluxcd/source-controller/pull/882) +- [OCIRepository] Add support for non-TLS insecure container registries + [#881](https://github.com/fluxcd/source-controller/pull/881) +- Fuzz optimisations + [#886](https://github.com/fluxcd/source-controller/pull/886) + +Fixes: +- [OCI] Static credentials should take precedence over the OIDC provider + [#884](https://github.com/fluxcd/source-controller/pull/884) + +## 0.28.0 + +**Release date:** 2022-08-29 + +This prerelease adds support for contextual login to container registries when pulling +Helm charts from Azure Container Registry, Amazon Elastic Container Registry +and Google Artifact Registry. Contextual login for `HelmRepository` +objects can be enabled by setting the `spec.provider` field to `azure`, `aws` or `gcp`. + +Selecting the OCI layer containing Kubernetes manifests is now possible +when defining `OCIRepository` objects by setting the `spec.layerSelector.mediaType` field. + +In addition, the controller dependencies have been updated to Kubernetes v1.25.0 and Helm v3.9.4. + +Improvements: +- [HelmRepository] Enable contextual login for OCI + [#873](https://github.com/fluxcd/source-controller/pull/873) +- [OCIRepository] Select layer by media type + [#871](https://github.com/fluxcd/source-controller/pull/871) +- Update Kubernetes packages to v1.25.0 + [#875](https://github.com/fluxcd/source-controller/pull/875) +- Update dependencies + [#869](https://github.com/fluxcd/source-controller/pull/869) +- Ensure Go 1.18 for fuzz image + [#872](https://github.com/fluxcd/source-controller/pull/872) + +## 0.27.0 + +**Release date:** 2022-08-17 + +This prerelease adds support for SAS Keys when authenticating against Azure Blob Storage +and improves the documentation for `OCIRepository`. + +The package `sourceignore`, which is used for excluding files from Flux internal artifacts, +has been moved to `fluxcd/pkg/sourceignore`. + +Improvements: +- OCIRepo docs: auto-login setup details + [#862](https://github.com/fluxcd/source-controller/pull/862) +- Add Support for SAS keys in Azure Blob + [#738](https://github.com/fluxcd/source-controller/pull/738) +- Use sourceignore from fluxcd/pkg/sourceignore + [#864](https://github.com/fluxcd/source-controller/pull/864) +- Update dependencies + [#869](https://github.com/fluxcd/source-controller/pull/869) + ## 0.26.1 **Release date:** 2022-08-11 @@ -170,7 +1574,7 @@ Improvements: This prerelease fixes a regression for SSH host key verification and fixes semver sorting for Helm OCI charts. -In addition, the controller dependencies where update to Kubernetes v1.24.1. +In addition, the controller dependencies have been updated to Kubernetes v1.24.1. Fixes: - helm: Fix sorting semver from OCI repository tags @@ -1816,7 +3220,7 @@ using the [notification.fluxcd.io API](https://github.com/fluxcd/notification-co **Release date:** 2020-06-24 This is the first prerelease ready for public testing. To get started -testing, see the [GitOps Toolkit guide](https://fluxcd.io/docs/get-started/). +testing, see the [GitOps Toolkit guide](https://fluxcd.io/flux/get-started/). ## 0.0.1-beta.2 diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 18b5829c9..11d05ad83 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -1,6 +1,6 @@ # Development -> **Note:** Please take a look at +> **Note:** Please take a look at > to find out about how to contribute to Flux and how to interact with the > Flux Development team. @@ -13,30 +13,9 @@ There are a number of dependencies required to be able to run the controller and - [Install Docker](https://docs.docker.com/engine/install/) - (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) -The [libgit2](https://libgit2.org/) dependency is now automatically managed by the Makefile logic. -However, it depends on [pkg-config](https://freedesktop.org/wiki/Software/pkg-config/) being installed: - -### macOS - -```console -$ # Ensure pkg-config is installed -$ brew install pkg-config -``` - -### Linux - -```console -$ # Ensure pkg-config is installed -$ pacman -S pkgconf -``` - -**Note:** Example shown is for Arch Linux, but likewise procedure can be -followed using any other package manager. Some distributions may have slight -variation of package names (e.g. `apt install -y pkg-config`). - In addition to the above, the following dependencies are also used by some of the `make` targets: -- `controller-gen` (v0.7.0) +- `controller-gen` (v0.19.0) - `gen-crd-api-reference-docs` (v0.3.0) - `setup-envtest` (latest) @@ -45,7 +24,7 @@ If any of the above dependencies are not present on your system, the first invoc ## How to run the test suite Prerequisites: -* Go >= 1.18 +* Go >= 1.25 You can run the test suite by simply doing @@ -79,7 +58,7 @@ make run ### Building the container image -Set the name of the container image to be created from the source code. This will be used +Set the name of the container image to be created from the source code. This will be used when building, pushing and referring to the image on YAML files: ```sh @@ -100,7 +79,7 @@ make docker-push ``` Alternatively, the three steps above can be done in a single line: - + ```sh IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \ make docker-build @@ -149,18 +128,12 @@ Create a `.vscode/launch.json` file: "type": "go", "request": "launch", "mode": "auto", - "envFile": "${workspaceFolder}/build/.env", - "program": "${workspaceFolder}/main.go" + "program": "${workspaceFolder}/main.go", + "args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"] } ] } ``` -Create the environment file containing details on how to load -`libgit2` dependencies: -```bash -make env -``` - Start debugging by either clicking `Run` > `Start Debugging` or using the relevant shortcut. diff --git a/Dockerfile b/Dockerfile index 0c5f645d7..0f7c6f849 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,28 +1,15 @@ -ARG BASE_VARIANT=alpine -ARG GO_VERSION=1.18 -ARG XX_VERSION=1.1.2 - -ARG LIBGIT2_IMG=ghcr.io/fluxcd/golang-with-libgit2-only -ARG LIBGIT2_TAG=v0.2.0 - -FROM ${LIBGIT2_IMG}:${LIBGIT2_TAG} AS libgit2-libs +ARG GO_VERSION=1.25 +ARG XX_VERSION=1.6.1 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx -FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} as gostable - -FROM gostable AS go-linux - -# Build-base consists of build platform dependencies and xx. -# These will be used at current arch to yield execute the cross compilations. -FROM go-${TARGETOS} AS build-base - -RUN apk add --no-cache clang lld pkgconfig +# Docker buildkit multi-arch build requires golang alpine +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder +# Copy the build utilities. COPY --from=xx / / -# build-go-mod can still be cached at build platform architecture. -FROM build-base as build-go-mod +ARG TARGETPLATFORM # Configure workspace WORKDIR /workspace @@ -37,56 +24,24 @@ COPY go.sum go.sum # Cache modules RUN go mod download - -# Build stage install per target platform -# dependency and effectively cross compile the application. -FROM build-go-mod as build - -ARG TARGETPLATFORM - -COPY --from=libgit2-libs /usr/local/ /usr/local/ - -# Some dependencies have to installed -# for the target platform: https://github.com/tonistiigi/xx#go--cgo -RUN xx-apk add musl-dev gcc clang lld - -WORKDIR /workspace - # Copy source code COPY main.go main.go -COPY controllers/ controllers/ -COPY pkg/ pkg/ COPY internal/ internal/ ARG TARGETPLATFORM ARG TARGETARCH -ENV CGO_ENABLED=1 - -# Instead of using xx-go, (cross) compile with vanilla go leveraging musl tool chain. -RUN export PKG_CONFIG_PATH="/usr/local/$(xx-info triple)/lib/pkgconfig" && \ - export CGO_LDFLAGS="$(pkg-config --static --libs --cflags libgit2) -static -fuse-ld=lld" && \ - xx-go build \ - -ldflags "-s -w" \ - -tags 'netgo,osusergo,static_build' \ - -o /source-controller -trimpath main.go; -# Ensure that the binary was cross-compiled correctly to the target platform. -RUN xx-verify --static /source-controller +# build without specifing the arch +ENV CGO_ENABLED=0 +RUN xx-go build -trimpath -a -o source-controller main.go - -FROM alpine:3.16 +FROM alpine:3.22 ARG TARGETPLATFORM RUN apk --no-cache add ca-certificates \ && update-ca-certificates -# Create minimal nsswitch.conf file to prioritize the usage of /etc/hosts over DNS queries. -# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-354316460 -RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf - -# Copy over binary from build -COPY --from=build /source-controller /usr/local/bin/ -COPY ATTRIBUTIONS.md / +COPY --from=builder /workspace/source-controller /usr/local/bin/ USER 65534:65534 ENTRYPOINT [ "source-controller" ] diff --git a/MAINTAINERS b/MAINTAINERS index 7b896b063..3a1bb4156 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7,6 +7,4 @@ from the main Flux v2 git repository, as listed in https://github.com/fluxcd/flux2/blob/main/MAINTAINERS -In alphabetical order: - -Paulo Gomes, Weaveworks (github: @pjbgf, slack: pjbgf) +Dipti Pai, Microsoft (github: @dipti-pai, slack: Dipti Pai) diff --git a/Makefile b/Makefile index 47b44a0a4..28226af5d 100644 --- a/Makefile +++ b/Makefile @@ -2,16 +2,15 @@ IMG ?= fluxcd/source-controller TAG ?= latest -# Base image used to build the Go binary -LIBGIT2_IMG ?= ghcr.io/fluxcd/golang-with-libgit2-only -LIBGIT2_TAG ?= v0.2.0 - # Allows for defining additional Go test args, e.g. '-tags integration'. GO_TEST_ARGS ?= -race # Allows for filtering tests based on the specified prefix GO_TEST_PREFIX ?= +# Defines whether cosign verification should be skipped. +SKIP_COSIGN_VERIFICATION ?= false + # Allows for defining additional Docker buildx arguments, # e.g. '--push'. BUILD_ARGS ?= @@ -30,21 +29,17 @@ REPOSITORY_ROOT := $(shell git rev-parse --show-toplevel) BUILD_DIR := $(REPOSITORY_ROOT)/build # Other dependency versions -ENVTEST_BIN_VERSION ?= 1.19.2 +ENVTEST_BIN_VERSION ?= 1.24.0 -# Caches libgit2 versions per tag, "forcing" rebuild only when needed. -LIBGIT2_PATH := $(BUILD_DIR)/libgit2/$(LIBGIT2_TAG) -LIBGIT2_LIB_PATH := $(LIBGIT2_PATH)/lib -LIBGIT2 := $(LIBGIT2_LIB_PATH)/libgit2.a +# FUZZ_TIME defines the max amount of time, in Go Duration, +# each fuzzer should run for. +FUZZ_TIME ?= 1m -export CGO_ENABLED=1 -export PKG_CONFIG_PATH=$(LIBGIT2_LIB_PATH)/pkgconfig -export CGO_LDFLAGS=$(shell PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) pkg-config --libs --static --cflags libgit2 2>/dev/null) GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))' # API (doc) generation utilities -CONTROLLER_GEN_VERSION ?= v0.7.0 -GEN_API_REF_DOCS_VERSION ?= v0.3.0 +CONTROLLER_GEN_VERSION ?= v0.19.0 +GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113 # If gobin not set, create one on ./build and add to path. ifeq (,$(shell go env GOBIN)) @@ -66,40 +61,38 @@ ifeq ($(shell uname -s),Darwin) ENVTEST_ARCH=amd64 endif -all: build +all: manager -build: check-deps $(LIBGIT2) ## Build manager binary +# Build manager binary +manager: generate fmt vet go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)" -test: $(LIBGIT2) install-envtest test-api check-deps ## Run all tests +test: install-envtest test-api ## Run all tests HTTPS_PROXY="" HTTP_PROXY="" \ KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ GIT_CONFIG_GLOBAL=/dev/null \ + GIT_CONFIG_NOSYSTEM=true \ go test $(GO_STATIC_FLAGS) \ ./... \ $(GO_TEST_ARGS) \ -coverprofile cover.out -test-ctrl: $(LIBGIT2) install-envtest test-api check-deps ## Run controller tests +test-ctrl: install-envtest test-api ## Run controller tests HTTPS_PROXY="" HTTP_PROXY="" \ KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ GIT_CONFIG_GLOBAL=/dev/null \ go test $(GO_STATIC_FLAGS) \ -run "^$(GO_TEST_PREFIX).*" \ - -v ./controllers \ + -v ./internal/controller \ -coverprofile cover.out -check-deps: -ifeq ($(shell uname -s),Darwin) - if ! command -v pkg-config &> /dev/null; then echo "pkg-config is required"; exit 1; fi -endif - test-api: ## Run api tests cd api; go test $(GO_TEST_ARGS) ./... -coverprofile cover.out -run: $(LIBGIT2) generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config - go run $(GO_STATIC_FLAGS) ./main.go +run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config + @mkdir -p $(PWD)/bin/data + go run $(GO_STATIC_FLAGS) ./main.go --storage-adv-addr=:0 --storage-path=$(PWD)/bin/data install: manifests ## Install CRDs into a cluster kustomize build config/crd | kubectl apply -f - @@ -122,18 +115,17 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc. cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" api-docs: gen-crd-api-reference-docs ## Generate API reference documentation - $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md + $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md tidy: ## Run go mod tidy - cd api; rm -f go.sum; go mod tidy -compat=1.18 - rm -f go.sum; go mod tidy -compat=1.18 + cd api; rm -f go.sum; go mod tidy -compat=1.25 + rm -f go.sum; go mod tidy -compat=1.25 fmt: ## Run go fmt against code go fmt ./... cd api; go fmt ./... - cd tests/fuzz; go fmt . -vet: $(LIBGIT2) ## Run go vet against code +vet: ## Run go vet against code go vet ./... cd api; go vet ./... @@ -142,8 +134,6 @@ generate: controller-gen ## Generate API code docker-build: ## Build the Docker image docker buildx build \ - --build-arg LIBGIT2_IMG=$(LIBGIT2_IMG) \ - --build-arg LIBGIT2_TAG=$(LIBGIT2_TAG) \ --platform=$(BUILD_PLATFORMS) \ -t $(IMG):$(TAG) \ $(BUILD_ARGS) . @@ -155,13 +145,13 @@ docker-push: ## Push Docker image CONTROLLER_GEN = $(GOBIN)/controller-gen .PHONY: controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION)) # Find or download gen-crd-api-reference-docs GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs .PHONY: gen-crd-api-reference-docs gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary - $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@3f29e6853552dcf08a8e846b1225f275ed0f3e3b) + $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@$(GEN_API_REF_DOCS_VERSION)) ENVTEST = $(GOBIN)/setup-envtest .PHONY: envtest @@ -176,40 +166,14 @@ install-envtest: setup-envtest ## Download envtest binaries locally. # setup-envtest sets anything below k8s to 0555 chmod -R u+w $(BUILD_DIR)/testbin -libgit2: $(LIBGIT2) ## Detect or download libgit2 library - -COSIGN = $(GOBIN)/cosign -$(LIBGIT2): - $(call go-install-tool,$(COSIGN),github.com/sigstore/cosign/cmd/cosign@latest) - - IMG=$(LIBGIT2_IMG) TAG=$(LIBGIT2_TAG) PATH=$(PATH):$(GOBIN) ./hack/install-libraries.sh - - .PHONY: help help: ## Display this help menu @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -update-attributions: - ./hack/update-attributions.sh - e2e: ./hack/ci/e2e.sh -verify: update-attributions fmt vet manifests api-docs -ifneq ($(shell grep -o 'LIBGIT2_IMG ?= \w.*' Makefile | cut -d ' ' -f 3):$(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), \ - $(shell grep -o "LIBGIT2_IMG=\w.*" Dockerfile | cut -d'=' -f2):$(shell grep -o "LIBGIT2_TAG=\w.*" Dockerfile | cut -d'=' -f2)) - @{ \ - echo "LIBGIT2_IMG and LIBGIT2_TAG must match in both Makefile and Dockerfile"; \ - exit 1; \ - } -endif -ifneq ($(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), $(shell grep -o "LIBGIT2_TAG=.*" tests/fuzz/oss_fuzz_build.sh | sed 's;LIBGIT2_TAG="$${LIBGIT2_TAG:-;;g' | sed 's;}";;g')) - @{ \ - echo "LIBGIT2_TAG must match in both Makefile and tests/fuzz/oss_fuzz_build.sh"; \ - exit 1; \ - } -endif - +verify: fmt vet manifests api-docs tidy @if [ ! "$$(git status --porcelain --untracked-files=no)" = "" ]; then \ echo "working directory is dirty:"; \ git --no-pager diff; \ @@ -224,38 +188,33 @@ TMP_DIR=$$(mktemp -d) ;\ cd $$TMP_DIR ;\ go mod init tmp ;\ echo "Downloading $(2)" ;\ -env -i bash -c "GOBIN=$(GOBIN) PATH=$(PATH) GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\ +env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\ rm -rf $$TMP_DIR ;\ } endef -# Build fuzzers -fuzz-build: $(LIBGIT2) - rm -rf $(BUILD_DIR)/fuzz/ - mkdir -p $(BUILD_DIR)/fuzz/out/ +# Build fuzzers used by oss-fuzz. +fuzz-build: + rm -rf $(shell pwd)/build/fuzz/ + mkdir -p $(shell pwd)/build/fuzz/out/ docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder docker run --rm \ -e FUZZING_LANGUAGE=go -e SANITIZER=address \ -e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \ - -v "$(BUILD_DIR)/fuzz/out":/out \ + -v "$(shell pwd)/build/fuzz/out":/out \ local-fuzzing:latest +# Run each fuzzer once to ensure they will work when executed by oss-fuzz. fuzz-smoketest: fuzz-build docker run --rm \ - -v "$(BUILD_DIR)/fuzz/out":/out \ + -v "$(shell pwd)/build/fuzz/out":/out \ -v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \ local-fuzzing:latest \ bash -c "/runner.sh" -# Creates an env file that can be used to load all source-controller's dependencies -# this is handy when you want to run adhoc debug sessions on tests or start the -# controller in a new debug session. -env: $(LIBGIT2) - echo 'GO_ENABLED="1"' > $(BUILD_DIR)/.env - echo 'PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)"' >> $(BUILD_DIR)/.env - echo 'LIBRARY_PATH="$(LIBRARY_PATH)"' >> $(BUILD_DIR)/.env - echo 'CGO_CFLAGS="$(CGO_CFLAGS)"' >> $(BUILD_DIR)/.env - echo 'CGO_LDFLAGS="$(CGO_LDFLAGS)"' >> $(BUILD_DIR)/.env - echo 'KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS)' >> $(BUILD_DIR)/.env - echo 'GIT_CONFIG_GLOBAL=/dev/null' >> $(BUILD_DIR)/.env +# Run fuzz tests for the duration set in FUZZ_TIME. +fuzz-native: + KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ + FUZZ_TIME=$(FUZZ_TIME) \ + ./tests/fuzz/native_go_run.sh diff --git a/PROJECT b/PROJECT index 10d980ac1..9d89d81be 100644 --- a/PROJECT +++ b/PROJECT @@ -1,12 +1,21 @@ domain: toolkit.fluxcd.io repo: github.com/fluxcd/source-controller resources: +- group: source + kind: GitRepository + version: v1 - group: source kind: GitRepository version: v1beta2 +- group: source + kind: HelmRepository + version: v1 - group: source kind: HelmRepository version: v1beta2 +- group: source + kind: HelmChart + version: v1 - group: source kind: HelmChart version: v1beta2 @@ -28,4 +37,13 @@ resources: - group: source kind: OCIRepository version: v1beta2 +- group: source + kind: Bucket + version: v1 +- group: source + kind: OCIRepository + version: v1 +- group: source + kind: ExternalArtifact + version: v1 version: "2" diff --git a/README.md b/README.md index 5f9a3f930..6f07b2e00 100644 --- a/README.md +++ b/README.md @@ -5,23 +5,49 @@ [![report](https://goreportcard.com/badge/github.com/fluxcd/source-controller)](https://goreportcard.com/report/github.com/fluxcd/source-controller) [![license](https://img.shields.io/github/license/fluxcd/source-controller.svg)](https://github.com/fluxcd/source-controller/blob/main/LICENSE) [![release](https://img.shields.io/github/release/fluxcd/source-controller/all.svg)](https://github.com/fluxcd/source-controller/releases) - + The source-controller is a Kubernetes operator, specialised in artifacts acquisition -from external sources such as Git, Helm repositories and S3 buckets. +from external sources such as Git, OCI, Helm repositories and S3-compatible buckets. The source-controller implements the -[source.toolkit.fluxcd.io](https://github.com/fluxcd/source-controller/tree/main/docs/spec/v1beta2) API -and is a core component of the [GitOps toolkit](https://fluxcd.io/docs/components/). +[source.toolkit.fluxcd.io](docs/spec/README.md) API +and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/components/). ![overview](docs/diagrams/source-controller-overview.png) -Features: +## APIs + +| Kind | API Version | +|----------------------------------------------------|-------------------------------| +| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` | +| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` | + +## Features -* authenticates to sources (SSH, user/password, API token) -* validates source authenticity (PGP) +* authenticates to sources (SSH, user/password, API token, Workload Identity) +* validates source authenticity (PGP, Cosign, Notation) * detects source changes based on update policies (semver) * fetches resources on-demand and on-a-schedule * packages the fetched resources into a well-known format (tar.gz, yaml) * makes the artifacts addressable by their source identifier (sha, version, ts) * makes the artifacts available in-cluster to interested 3rd parties * notifies interested 3rd parties of source changes and availability (status conditions, events, hooks) -* reacts to Git push and Helm chart upload events (via [notification-controller](https://github.com/fluxcd/notification-controller)) +* reacts to Git, Helm and OCI artifacts push events (via [notification-controller](https://github.com/fluxcd/notification-controller)) + +## Guides + +* [Get started with Flux](https://fluxcd.io/flux/get-started/) +* [Setup Webhook Receivers](https://fluxcd.io/flux/guides/webhook-receivers/) +* [Setup Notifications](https://fluxcd.io/flux/guides/notifications/) +* [How to build, publish and consume OCI Artifacts with Flux](https://fluxcd.io/flux/cheatsheets/oci-artifacts/) + +## Roadmap + +The roadmap for the Flux family of projects can be found at . + +## Contributing + +This project is Apache 2.0 licensed and accepts contributions via GitHub pull requests. +To start contributing please see the [development guide](DEVELOPMENT.md). diff --git a/api/go.mod b/api/go.mod index 790a076de..3d821f349 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,30 +1,34 @@ module github.com/fluxcd/source-controller/api -go 1.18 +go 1.25.0 require ( - github.com/fluxcd/pkg/apis/acl v0.0.3 - github.com/fluxcd/pkg/apis/meta v0.14.2 - k8s.io/apimachinery v0.24.1 - sigs.k8s.io/controller-runtime v0.11.2 + github.com/fluxcd/pkg/apis/acl v0.9.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + k8s.io/apimachinery v0.34.0 + sigs.k8s.io/controller-runtime v0.22.1 ) // Fix CVE-2022-28948 replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 require ( - github.com/go-logr/logr v1.2.2 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/text v0.3.7 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/text v0.28.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index b311f6dce..1aa815d66 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,252 +1,118 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= -github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.14.2 h1:/Hf7I/Vz01vv3m7Qx7DtQvrzAL1oVt0MJcLb/I1Y1HE= -github.com/fluxcd/pkg/apis/meta v0.14.2/go.mod h1:ijZ61VG/8T3U17gj0aFL3fdtZL+mulD6V8VrLLUCAgM= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA= -k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= -k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA= -sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go new file mode 100644 index 000000000..bbedcefb3 --- /dev/null +++ b/api/v1/bucket_types.go @@ -0,0 +1,281 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = "generic" + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service + // and workload identity authentication. + BucketProviderAmazon string = "aws" + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = "gcp" + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = "azure" +) + +// BucketSpec specifies the required configuration to produce an Artifact for +// an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.serviceAccountName)", message="ServiceAccountName is not supported for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.secretRef) || !has(self.serviceAccountName)", message="cannot set both .spec.secretRef and .spec.serviceAccountName" +type BucketSpec struct { + // Provider of the object storage bucket. + // Defaults to 'generic', which expects an S3 (API) compatible object + // storage. + // +kubebuilder:validation:Enum=generic;aws;gcp;azure + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // BucketName is the name of the object storage bucket. + // +required + BucketName string `json:"bucketName"` + + // Endpoint is the object storage address the BucketName is located at. + // +required + Endpoint string `json:"endpoint"` + + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP Endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Region of the Endpoint where the BucketName is located in. + // +optional + Region string `json:"region,omitempty"` + + // Prefix to use for server-side filtering of files in the Bucket. + // +optional + Prefix string `json:"prefix,omitempty"` + + // SecretRef specifies the Secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the bucket. This field is only supported for the 'gcp' and 'aws' providers. + // For more information about workload identity: + // https://fluxcd.io/flux/components/source/buckets/#workload-identity + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the Bucket Endpoint is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for fetch operations, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // Bucket. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + +// BucketStatus records the observed state of a Bucket. +type BucketStatus struct { + // ObservedGeneration is the last observed generation of the Bucket object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful Bucket reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceededReason signals that the Bucket listing and fetch + // operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" + + // BucketOperationFailedReason signals that the Bucket listing or fetch + // operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in *Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in *Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *Bucket) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// Bucket is the Schema for the buckets API. +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// BucketList contains a list of Bucket objects. +// +kubebuilder:object:root=true +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1/condition_types.go b/api/v1/condition_types.go new file mode 100644 index 000000000..9641db99c --- /dev/null +++ b/api/v1/condition_types.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const SourceFinalizer = "finalizers.fluxcd.io" + +const ( + // ArtifactInStorageCondition indicates the availability of the Artifact in + // the storage. + // If True, the Artifact is stored successfully. + // This Condition is only present on the resource if the Artifact is + // successfully stored. + ArtifactInStorageCondition string = "ArtifactInStorage" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source + // is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // SourceVerifiedCondition indicates the integrity verification of the + // Source. + // If True, the integrity check succeeded. If False, it failed. + // This Condition is only present on the resource if the integrity check + // is enabled. + SourceVerifiedCondition string = "SourceVerified" + + // FetchFailedCondition indicates a transient or persistent fetch failure + // of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, + // and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" + + // BuildFailedCondition indicates a transient or persistent build failure + // of a Source's Artifact. + // If True, the Source can be in an ArtifactOutdatedCondition. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + BuildFailedCondition string = "BuildFailed" + + // StorageOperationFailedCondition indicates a transient or persistent + // failure related to storage. If True, the reconciliation failed while + // performing some filesystem operation. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + StorageOperationFailedCondition string = "StorageOperationFailed" +) + +// Reasons are provided as utility, and not part of the declarative API. +const ( + // URLInvalidReason signals that a given Source has an invalid URL. + URLInvalidReason string = "URLInvalid" + + // AuthenticationFailedReason signals that a Secret does not have the + // required fields, or the provided credentials do not match. + AuthenticationFailedReason string = "AuthenticationFailed" + + // VerificationError signals that the Source's verification + // check failed. + VerificationError string = "VerificationError" + + // DirCreationFailedReason signals a failure caused by a directory creation + // operation. + DirCreationFailedReason string = "DirectoryCreationFailed" + + // StatOperationFailedReason signals a failure caused by a stat operation on + // a path. + StatOperationFailedReason string = "StatOperationFailed" + + // ReadOperationFailedReason signals a failure caused by a read operation. + ReadOperationFailedReason string = "ReadOperationFailed" + + // AcquireLockFailedReason signals a failure in acquiring lock. + AcquireLockFailedReason string = "AcquireLockFailed" + + // InvalidPathReason signals a failure caused by an invalid path. + InvalidPathReason string = "InvalidPath" + + // ArchiveOperationFailedReason signals a failure in archive operation. + ArchiveOperationFailedReason string = "ArchiveOperationFailed" + + // SymlinkUpdateFailedReason signals a failure in updating a symlink. + SymlinkUpdateFailedReason string = "SymlinkUpdateFailed" + + // ArtifactUpToDateReason signals that an existing Artifact is up-to-date + // with the Source. + ArtifactUpToDateReason string = "ArtifactUpToDate" + + // CacheOperationFailedReason signals a failure in cache operation. + CacheOperationFailedReason string = "CacheOperationFailed" + + // PatchOperationFailedReason signals a failure in patching a kubernetes API + // object. + PatchOperationFailedReason string = "PatchOperationFailed" + + // InvalidSTSConfigurationReason signals that the STS configurtion is invalid. + InvalidSTSConfigurationReason string = "InvalidSTSConfiguration" + + // InvalidProviderConfigurationReason signals that the provider + // configuration is invalid. + InvalidProviderConfigurationReason string = "InvalidProviderConfiguration" +) diff --git a/pkg/git/gogit/gogit.go b/api/v1/doc.go similarity index 73% rename from pkg/git/gogit/gogit.go rename to api/v1/doc.go index 2ce0a8649..a06b2174b 100644 --- a/pkg/git/gogit/gogit.go +++ b/api/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Flux authors +Copyright 2023 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package gogit - -import "github.com/fluxcd/source-controller/pkg/git" - -const ( - Implementation git.Implementation = "go-git" -) +// Package v1 contains API Schema definitions for the source v1 API group +// +kubebuilder:object:generate=true +// +groupName=source.toolkit.fluxcd.io +package v1 diff --git a/api/v1/externalartifact_types.go b/api/v1/externalartifact_types.go new file mode 100644 index 000000000..e338b733b --- /dev/null +++ b/api/v1/externalartifact_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// ExternalArtifactKind is the string representation of the ExternalArtifact. +const ExternalArtifactKind = "ExternalArtifact" + +// ExternalArtifactSpec defines the desired state of ExternalArtifact +type ExternalArtifactSpec struct { + // SourceRef points to the Kubernetes custom resource for + // which the artifact is generated. + // +optional + SourceRef *meta.NamespacedObjectKindReference `json:"sourceRef,omitempty"` +} + +// ExternalArtifactStatus defines the observed state of ExternalArtifact +type ExternalArtifactStatus struct { + // Artifact represents the output of an ExternalArtifact reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // Conditions holds the conditions for the ExternalArtifact. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// GetConditions returns the status conditions of the object. +func (in *ExternalArtifact) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *ExternalArtifact) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetArtifact returns the latest Artifact from the ExternalArtifact if +// present in the status sub-resource. +func (in *ExternalArtifact) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetRequeueAfter returns the duration after which the ExternalArtifact +// must be reconciled again. +func (in *ExternalArtifact) GetRequeueAfter() time.Duration { + return time.Minute +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Source",type="string",JSONPath=".spec.sourceRef.name",description="" + +// ExternalArtifact is the Schema for the external artifacts API +type ExternalArtifact struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalArtifactSpec `json:"spec,omitempty"` + Status ExternalArtifactStatus `json:"status,omitempty"` +} + +// ExternalArtifactList contains a list of ExternalArtifact +// +kubebuilder:object:root=true +type ExternalArtifactList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalArtifact `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalArtifact{}, &ExternalArtifactList{}) +} diff --git a/api/v1/gitrepository_types.go b/api/v1/gitrepository_types.go new file mode 100644 index 000000000..f104fd0f1 --- /dev/null +++ b/api/v1/gitrepository_types.go @@ -0,0 +1,384 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // GitRepositoryKind is the string representation of a GitRepository. + GitRepositoryKind = "GitRepository" + + // GitProviderGeneric provides support for authentication using + // credentials specified in secretRef. + GitProviderGeneric string = "generic" + + // GitProviderAzure provides support for authentication to azure + // repositories using Managed Identity. + GitProviderAzure string = "azure" + + // GitProviderGitHub provides support for authentication to git + // repositories using GitHub App authentication + GitProviderGitHub string = "github" +) + +const ( + // IncludeUnavailableCondition indicates one of the includes is not + // available. For example, because it does not exist, or does not have an + // Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" +) + +// GitVerificationMode specifies the verification mode for a Git repository. +type GitVerificationMode string + +// Valid checks the validity of the Git verification mode. +func (m GitVerificationMode) Valid() bool { + switch m { + case ModeGitHEAD, ModeGitTag, ModeGitTagAndHEAD: + return true + default: + return false + } +} + +const ( + // ModeGitHEAD implies that the HEAD of the Git repository (after it has been + // checked out to the required commit) should be verified. + ModeGitHEAD GitVerificationMode = "HEAD" + // ModeGitTag implies that the tag object specified in the checkout configuration + // should be verified. + ModeGitTag GitVerificationMode = "Tag" + // ModeGitTagAndHEAD implies that both the tag object and the commit it points + // to should be verified. + ModeGitTagAndHEAD GitVerificationMode = "TagAndHEAD" +) + +// GitRepositorySpec specifies the required configuration to produce an +// Artifact for a Git repository. +// +kubebuilder:validation:XValidation:rule="!has(self.serviceAccountName) || (has(self.provider) && self.provider == 'azure')",message="serviceAccountName can only be set when provider is 'azure'" +type GitRepositorySpec struct { + // URL specifies the Git repository URL, it can be an HTTP/S or SSH address. + // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials for + // the GitRepository. + // For HTTPS repositories the Secret must contain 'username' and 'password' + // fields for basic auth or 'bearerToken' field for token auth. + // For SSH repositories the Secret must contain 'identity' + // and 'known_hosts' fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Provider used for authentication, can be 'azure', 'github', 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;azure;github + // +optional + Provider string `json:"provider,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to + // authenticate to the GitRepository. This field is only supported for 'azure' provider. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // Interval at which the GitRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for Git operations like cloning, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Reference specifies the Git reference to resolve and monitor for + // changes, defaults to the 'master' branch. + // +optional + Reference *GitRepositoryRef `json:"ref,omitempty"` + + // Verification specifies the configuration to verify the Git commit + // signature(s). + // +optional + Verification *GitRepositoryVerification `json:"verify,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Git server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // GitRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // RecurseSubmodules enables the initialization of all submodules within + // the GitRepository as cloned from the URL, using their default settings. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Include specifies a list of GitRepository resources which Artifacts + // should be included in the Artifact produced for this GitRepository. + // +optional + Include []GitRepositoryInclude `json:"include,omitempty"` + + // SparseCheckout specifies a list of directories to checkout when cloning + // the repository. If specified, only these directories are included in the + // Artifact produced for this GitRepository. + // +optional + SparseCheckout []string `json:"sparseCheckout,omitempty"` +} + +// GitRepositoryInclude specifies a local reference to a GitRepository which +// Artifact (sub-)contents must be included, and where they should be placed. +type GitRepositoryInclude struct { + // GitRepositoryRef specifies the GitRepository which Artifact contents + // must be included. + // +required + GitRepositoryRef meta.LocalObjectReference `json:"repository"` + + // FromPath specifies the path to copy contents from, defaults to the root + // of the Artifact. + // +optional + FromPath string `json:"fromPath,omitempty"` + + // ToPath specifies the path to copy contents to, defaults to the name of + // the GitRepositoryRef. + // +optional + ToPath string `json:"toPath,omitempty"` +} + +// GetFromPath returns the specified FromPath. +func (in *GitRepositoryInclude) GetFromPath() string { + return in.FromPath +} + +// GetToPath returns the specified ToPath, falling back to the name of the +// GitRepositoryRef. +func (in *GitRepositoryInclude) GetToPath() string { + if in.ToPath == "" { + return in.GitRepositoryRef.Name + } + return in.ToPath +} + +// GitRepositoryRef specifies the Git reference to resolve and checkout. +type GitRepositoryRef struct { + // Branch to check out, defaults to 'master' if no other field is defined. + // +optional + Branch string `json:"branch,omitempty"` + + // Tag to check out, takes precedence over Branch. + // +optional + Tag string `json:"tag,omitempty"` + + // SemVer tag expression to check out, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + // + // It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + // Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" + // +optional + Name string `json:"name,omitempty"` + + // Commit SHA to check out, takes precedence over all reference fields. + // + // This can be combined with Branch to shallow clone the branch, in which + // the commit is expected to exist. + // +optional + Commit string `json:"commit,omitempty"` +} + +// GitRepositoryVerification specifies the Git commit signature verification +// strategy. +type GitRepositoryVerification struct { + // Mode specifies which Git object(s) should be verified. + // + // The variants "head" and "HEAD" both imply the same thing, i.e. verify + // the commit that the HEAD of the Git repository points to. The variant + // "head" solely exists to ensure backwards compatibility. + // +kubebuilder:validation:Enum=head;HEAD;Tag;TagAndHEAD + // +optional + // +kubebuilder:default:=HEAD + Mode GitVerificationMode `json:"mode,omitempty"` + + // SecretRef specifies the Secret containing the public keys of trusted Git + // authors. + // +required + SecretRef meta.LocalObjectReference `json:"secretRef"` +} + +// GitRepositoryStatus records the observed state of a Git repository. +type GitRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the GitRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the GitRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Artifact represents the last successful GitRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // IncludedArtifacts contains a list of the last successfully included + // Artifacts as instructed by GitRepositorySpec.Include. + // +optional + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedRecurseSubmodules is the observed resource submodules + // configuration used to produce the current Artifact. + // +optional + ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"` + + // ObservedInclude is the observed list of GitRepository resources used to + // produce the current Artifact. + // +optional + ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"` + + // ObservedSparseCheckout is the observed list of directories used to + // produce the current Artifact. + // +optional + ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"` + + // SourceVerificationMode is the last used verification mode indicating + // which Git object(s) have been verified. + // +optional + SourceVerificationMode *GitVerificationMode `json:"sourceVerificationMode,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // GitOperationSucceedReason signals that a Git operation (e.g. clone, + // checkout, etc.) succeeded. + GitOperationSucceedReason string = "GitOperationSucceeded" + + // GitOperationFailedReason signals that a Git operation (e.g. clone, + // checkout, etc.) failed. + GitOperationFailedReason string = "GitOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the GitRepository must be +// reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the GitRepository if present in +// the status sub-resource. +func (in *GitRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetProvider returns the Git authentication provider. +func (v *GitRepository) GetProvider() string { + if v.Spec.Provider == "" { + return GitProviderGeneric + } + return v.Spec.Provider +} + +// GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default. +func (v *GitRepositoryVerification) GetMode() GitVerificationMode { + if v.Mode.Valid() { + return v.Mode + } + return ModeGitHEAD +} + +// VerifyHEAD returns if the configured mode instructs verification of the +// Git HEAD. +func (v *GitRepositoryVerification) VerifyHEAD() bool { + return v.GetMode() == ModeGitHEAD || v.GetMode() == ModeGitTagAndHEAD +} + +// VerifyTag returns if the configured mode instructs verification of the +// Git tag. +func (v *GitRepositoryVerification) VerifyTag() bool { + return v.GetMode() == ModeGitTag || v.GetMode() == ModeGitTagAndHEAD +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=gitrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// GitRepository is the Schema for the gitrepositories API. +type GitRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GitRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status GitRepositoryStatus `json:"status,omitempty"` +} + +// GitRepositoryList contains a list of GitRepository objects. +// +kubebuilder:object:root=true +type GitRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GitRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{}) +} diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go new file mode 100644 index 000000000..b539a7947 --- /dev/null +++ b/api/v1/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1/helmchart_types.go b/api/v1/helmchart_types.go new file mode 100644 index 000000000..23cb24146 --- /dev/null +++ b/api/v1/helmchart_types.go @@ -0,0 +1,227 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// HelmChartKind is the string representation of a HelmChart. +const HelmChartKind = "HelmChart" + +// HelmChartSpec specifies the desired state of a Helm chart. +type HelmChartSpec struct { + // Chart is the name or path the Helm chart is available at in the + // SourceRef. + // +required + Chart string `json:"chart"` + + // Version is the chart version semver expression, ignored for charts from + // GitRepository and Bucket sources. Defaults to latest when omitted. + // +kubebuilder:default:=* + // +optional + Version string `json:"version,omitempty"` + + // SourceRef is the reference to the Source the chart is available at. + // +required + SourceRef LocalHelmChartSourceReference `json:"sourceRef"` + + // Interval at which the HelmChart SourceRef is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // ReconcileStrategy determines what enables the creation of a new artifact. + // Valid values are ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // ValuesFiles is an alternative list of values files to use as the chart + // values (values.yaml is not included by default), expected to be a + // relative path in the SourceRef. + // Values files are merged in the order of this list with the last file + // overriding the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // IgnoreMissingValuesFiles controls whether to silently ignore missing values + // files rather than failing. + // +optional + IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // This field is only supported when using HelmRepository source with spec.type 'oci'. + // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` +} + +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + +// LocalHelmChartSourceReference contains enough information to let you locate +// the typed referenced object at namespace level. +type LocalHelmChartSourceReference struct { + // APIVersion of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + // 'Bucket'). + // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// HelmChartStatus records the observed state of the HelmChart. +type HelmChartStatus struct { + // ObservedGeneration is the last observed generation of the HelmChart + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ObservedSourceArtifactRevision is the last observed Artifact.Revision + // of the HelmChartSpec.SourceRef. + // +optional + ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"` + + // ObservedChartName is the last observed chart name as specified by the + // resolved chart reference. + // +optional + ObservedChartName string `json:"observedChartName,omitempty"` + + // ObservedValuesFiles are the observed value files of the last successful + // reconciliation. + // It matches the chart in the last successfully reconciled artifact. + // +optional + ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"` + + // Conditions holds the conditions for the HelmChart. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // ChartPullSucceededReason signals that the pull of the Helm chart + // succeeded. + ChartPullSucceededReason string = "ChartPullSucceeded" + + // ChartPackageSucceededReason signals that the package of the Helm + // chart succeeded. + ChartPackageSucceededReason string = "ChartPackageSucceeded" +) + +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmChart) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + return in.Spec.ValuesFiles +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=hc +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` +// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmChart is the Schema for the helmcharts API. +type HelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmChartStatus `json:"status,omitempty"` +} + +// HelmChartList contains a list of HelmChart objects. +// +kubebuilder:object:root=true +type HelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmChart `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmChart{}, &HelmChartList{}) +} diff --git a/api/v1/helmrepository_types.go b/api/v1/helmrepository_types.go new file mode 100644 index 000000000..1c19064a5 --- /dev/null +++ b/api/v1/helmrepository_types.go @@ -0,0 +1,228 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // HelmRepositoryKind is the string representation of a HelmRepository. + HelmRepositoryKind = "HelmRepository" + // HelmRepositoryURLIndexKey is the key used for indexing HelmRepository + // objects by their HelmRepositorySpec.URL. + HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" + // HelmRepositoryTypeDefault is the default HelmRepository type. + // It is used when no type is specified and corresponds to a Helm repository. + HelmRepositoryTypeDefault = "default" + // HelmRepositoryTypeOCI is the type for an OCI repository. + HelmRepositoryTypeOCI = "oci" +) + +// HelmRepositorySpec specifies the required configuration to produce an +// Artifact for a Helm repository index YAML. +type HelmRepositorySpec struct { + // URL of the Helm repository, a valid URL contains at least a protocol and + // host. + // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials + // for the HelmRepository. + // For HTTP/S basic auth the secret must contain 'username' and 'password' + // fields. + // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + // keys is deprecated. Please use `.spec.certSecretRef` instead. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // It takes precedence over the values specified in the Secret referred + // to by `.spec.secretRef`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // PassCredentials allows the credentials from the SecretRef to be passed + // on to a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the + // index differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result + // in credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + + // Interval at which the HelmRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +optional + Interval metav1.Duration `json:"interval,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // This field is only taken into account if the .spec.type field is set to 'oci'. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Timeout is used for the index fetch operation for an HTTPS helm repository, + // and for remote OCI Repository operations like pulling for an OCI helm + // chart by the associated HelmChart. + // Its default value is 60s. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // HelmRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` + + // Type of the HelmRepository. + // When this field is set to "oci", the URL field value must be prefixed with "oci://". + // +kubebuilder:validation:Enum=default;oci + // +optional + Type string `json:"type,omitempty"` + + // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` +} + +// HelmRepositoryStatus records the observed state of the HelmRepository. +type HelmRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the HelmRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // HelmRepositoryStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful HelmRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // IndexationFailedReason signals that the HelmRepository index fetch + // failed. + IndexationFailedReason string = "IndexationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + if in.Spec.Interval.Duration != 0 { + return in.Spec.Interval.Duration + } + return time.Minute +} + +// GetTimeout returns the timeout duration used for various operations related +// to this HelmRepository. +func (in HelmRepository) GetTimeout() time.Duration { + if in.Spec.Timeout != nil { + return in.Spec.Timeout.Duration + } + return time.Minute +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmRepository is the Schema for the helmrepositories API. +type HelmRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmRepositoryStatus `json:"status,omitempty"` +} + +// HelmRepositoryList contains a list of HelmRepository objects. +// +kubebuilder:object:root=true +type HelmRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{}) +} diff --git a/api/v1/ocirepository_types.go b/api/v1/ocirepository_types.go new file mode 100644 index 000000000..8c4d3f0fc --- /dev/null +++ b/api/v1/ocirepository_types.go @@ -0,0 +1,296 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // OCIRepositoryKind is the string representation of an OCIRepository. + OCIRepositoryKind = "OCIRepository" + + // OCIRepositoryPrefix is the prefix used for OCIRepository URLs. + OCIRepositoryPrefix = "oci://" + + // GenericOCIProvider provides support for authentication using static credentials + // for any OCI compatible API such as Docker Registry, GitHub Container Registry, + // Docker Hub, Quay, etc. + GenericOCIProvider string = "generic" + + // AmazonOCIProvider provides support for OCI authentication using AWS IRSA. + AmazonOCIProvider string = "aws" + + // GoogleOCIProvider provides support for OCI authentication using GCP workload identity. + GoogleOCIProvider string = "gcp" + + // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal, + // Managed Identity or Shared Key. + AzureOCIProvider string = "azure" + + // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer. + OCILayerExtract = "extract" + + // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer. + OCILayerCopy = "copy" +) + +// OCIRepositorySpec defines the desired state of OCIRepository +type OCIRepositorySpec struct { + // URL is a reference to an OCI artifact repository hosted + // on a remote container registry. + // +kubebuilder:validation:Pattern="^oci://.*$" + // +required + URL string `json:"url"` + + // The OCI reference to pull and monitor for changes, + // defaults to the latest tag. + // +optional + Reference *OCIRepositoryRef `json:"ref,omitempty"` + + // LayerSelector specifies which layer should be extracted from the OCI artifact. + // When not specified, the first layer found in the artifact is selected. + // +optional + LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"` + + // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // SecretRef contains the secret name containing the registry login + // credentials to resolve image metadata. + // The secret must be of type kubernetes.io/dockerconfigjson. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the image pull if the service account has attached pull secrets. For more information: + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the OCIRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote OCI Repository operations like pulling, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// OCIRepositoryRef defines the image reference for the OCIRepository's URL +type OCIRepositoryRef struct { + // Digest is the image digest to pull, takes precedence over SemVer. + // The value should be in the format 'sha256:'. + // +optional + Digest string `json:"digest,omitempty"` + + // SemVer is the range of tags to pull selecting the latest within + // the range, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + + // Tag is the image tag to pull, defaults to latest. + // +optional + Tag string `json:"tag,omitempty"` +} + +// OCILayerSelector specifies which layer should be extracted from an OCI Artifact +type OCILayerSelector struct { + // MediaType specifies the OCI media type of the layer + // which should be extracted from the OCI Artifact. The + // first layer matching this type is selected. + // +optional + MediaType string `json:"mediaType,omitempty"` + + // Operation specifies how the selected layer should be processed. + // By default, the layer compressed content is extracted to storage. + // When the operation is set to 'copy', the layer compressed content + // is persisted to storage as it is. + // +kubebuilder:validation:Enum=extract;copy + // +optional + Operation string `json:"operation,omitempty"` +} + +// OCIRepositoryStatus defines the observed state of OCIRepository +type OCIRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the OCIRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last OCI Repository sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful OCI Repository sync. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedLayerSelector is the observed layer selector used for constructing + // the source artifact. + // +optional + ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // OCIPullFailedReason signals that a pull operation failed. + OCIPullFailedReason string = "OCIArtifactPullFailed" + + // OCILayerOperationFailedReason signals that an OCI layer operation failed. + OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in OCIRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *OCIRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the OCIRepository must be +// reconciled again. +func (in OCIRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the OCIRepository if present in +// the status sub-resource. +func (in *OCIRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetLayerMediaType returns the media type layer selector if found in spec. +func (in *OCIRepository) GetLayerMediaType() string { + if in.Spec.LayerSelector == nil { + return "" + } + + return in.Spec.LayerSelector.MediaType +} + +// GetLayerOperation returns the layer selector operation (defaults to extract). +func (in *OCIRepository) GetLayerOperation() string { + if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" { + return OCILayerExtract + } + + return in.Spec.LayerSelector.Operation +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ocirepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// OCIRepository is the Schema for the ocirepositories API +type OCIRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OCIRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status OCIRepositoryStatus `json:"status,omitempty"` +} + +// OCIRepositoryList contains a list of OCIRepository +// +kubebuilder:object:root=true +type OCIRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OCIRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{}) +} diff --git a/api/v1/ociverification_types.go b/api/v1/ociverification_types.go new file mode 100644 index 000000000..de74be343 --- /dev/null +++ b/api/v1/ociverification_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/fluxcd/pkg/apis/meta" +) + +// OCIRepositoryVerification verifies the authenticity of an OCI Artifact +type OCIRepositoryVerification struct { + // Provider specifies the technology used to sign the OCI Artifact. + // +kubebuilder:validation:Enum=cosign;notation + // +kubebuilder:default:=cosign + Provider string `json:"provider"` + + // SecretRef specifies the Kubernetes Secret containing the + // trusted public keys. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // MatchOIDCIdentity specifies the identity matching criteria to use + // while verifying an OCI artifact which was signed using Cosign keyless + // signing. The artifact's identity is deemed to be verified if any of the + // specified matchers match against the identity. + // +optional + MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"` +} + +// OIDCIdentityMatch specifies options for verifying the certificate identity, +// i.e. the issuer and the subject of the certificate. +type OIDCIdentityMatch struct { + // Issuer specifies the regex pattern to match against to verify + // the OIDC issuer in the Fulcio certificate. The pattern must be a + // valid Go regular expression. + // +required + Issuer string `json:"issuer"` + // Subject specifies the regex pattern to match against to verify + // the identity subject in the Fulcio certificate. The pattern must + // be a valid Go regular expression. + // +required + Subject string `json:"subject"` +} diff --git a/api/v1/source.go b/api/v1/source.go new file mode 100644 index 000000000..d879f6034 --- /dev/null +++ b/api/v1/source.go @@ -0,0 +1,47 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // SourceIndexKey is the key used for indexing objects based on their + // referenced Source. + SourceIndexKey string = ".metadata.source" +) + +// Source interface must be supported by all API types. +// Source is the interface that provides generic access to the Artifact and +// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io +// API group. +// +// +k8s:deepcopy-gen=false +type Source interface { + runtime.Object + // GetRequeueAfter returns the duration after which the source must be + // reconciled again. + GetRequeueAfter() time.Duration + // GetArtifact returns the latest artifact from the source if present in + // the status sub-resource. + GetArtifact() *meta.Artifact +} diff --git a/pkg/git/libgit2/libgit2.go b/api/v1/sts_types.go similarity index 56% rename from pkg/git/libgit2/libgit2.go rename to api/v1/sts_types.go index e705e6b0a..4b1d05881 100644 --- a/pkg/git/libgit2/libgit2.go +++ b/api/v1/sts_types.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Flux authors +Copyright 2024 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package libgit2 - -import "github.com/fluxcd/source-controller/pkg/git" +package v1 const ( - Implementation git.Implementation = "libgit2" + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" ) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..14f1ba3c2 --- /dev/null +++ b/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,998 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifact) DeepCopyInto(out *ExternalArtifact) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifact. +func (in *ExternalArtifact) DeepCopy() *ExternalArtifact { + if in == nil { + return nil + } + out := new(ExternalArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifact) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactList) DeepCopyInto(out *ExternalArtifactList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalArtifact, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactList. +func (in *ExternalArtifactList) DeepCopy() *ExternalArtifactList { + if in == nil { + return nil + } + out := new(ExternalArtifactList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifactList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactSpec) DeepCopyInto(out *ExternalArtifactSpec) { + *out = *in + if in.SourceRef != nil { + in, out := &in.SourceRef, &out.SourceRef + *out = new(meta.NamespacedObjectKindReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactSpec. +func (in *ExternalArtifactSpec) DeepCopy() *ExternalArtifactSpec { + if in == nil { + return nil + } + out := new(ExternalArtifactSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactStatus) DeepCopyInto(out *ExternalArtifactStatus) { + *out = *in + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactStatus. +func (in *ExternalArtifactStatus) DeepCopy() *ExternalArtifactStatus { + if in == nil { + return nil + } + out := new(ExternalArtifactStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepository) DeepCopyInto(out *GitRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository. +func (in *GitRepository) DeepCopy() *GitRepository { + if in == nil { + return nil + } + out := new(GitRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) { + *out = *in + out.GitRepositoryRef = in.GitRepositoryRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude. +func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude { + if in == nil { + return nil + } + out := new(GitRepositoryInclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GitRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList. +func (in *GitRepositoryList) DeepCopy() *GitRepositoryList { + if in == nil { + return nil + } + out := new(GitRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef. +func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef { + if in == nil { + return nil + } + out := new(GitRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(GitRepositoryRef) + **out = **in + } + if in.Verification != nil { + in, out := &in.Verification, &out.Verification + *out = new(GitRepositoryVerification) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.SparseCheckout != nil { + in, out := &in.SparseCheckout, &out.SparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. +func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec { + if in == nil { + return nil + } + out := new(GitRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.IncludedArtifacts != nil { + in, out := &in.IncludedArtifacts, &out.IncludedArtifacts + *out = make([]*meta.Artifact, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + } + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedInclude != nil { + in, out := &in.ObservedInclude, &out.ObservedInclude + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.ObservedSparseCheckout != nil { + in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceVerificationMode != nil { + in, out := &in.SourceVerificationMode, &out.SourceVerificationMode + *out = new(GitVerificationMode) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus. +func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus { + if in == nil { + return nil + } + out := new(GitRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification. +func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification { + if in == nil { + return nil + } + out := new(GitRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChart) DeepCopyInto(out *HelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart. +func (in *HelmChart) DeepCopy() *HelmChart { + if in == nil { + return nil + } + out := new(HelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartList) DeepCopyInto(out *HelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList. +func (in *HelmChartList) DeepCopy() *HelmChartList { + if in == nil { + return nil + } + out := new(HelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { + *out = *in + out.SourceRef = in.SourceRef + out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. +func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { + if in == nil { + return nil + } + out := new(HelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { + *out = *in + if in.ObservedValuesFiles != nil { + in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus. +func (in *HelmChartStatus) DeepCopy() *HelmChartStatus { + if in == nil { + return nil + } + out := new(HelmChartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList. +func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList { + if in == nil { + return nil + } + out := new(HelmRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. +func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec { + if in == nil { + return nil + } + out := new(HelmRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus. +func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference. +func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference { + if in == nil { + return nil + } + out := new(LocalHelmChartSourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector. +func (in *OCILayerSelector) DeepCopy() *OCILayerSelector { + if in == nil { + return nil + } + out := new(OCILayerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepository) DeepCopyInto(out *OCIRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository. +func (in *OCIRepository) DeepCopy() *OCIRepository { + if in == nil { + return nil + } + out := new(OCIRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList. +func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList { + if in == nil { + return nil + } + out := new(OCIRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef. +func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef { + if in == nil { + return nil + } + out := new(OCIRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(OCIRepositoryRef) + **out = **in + } + if in.LayerSelector != nil { + in, out := &in.LayerSelector, &out.LayerSelector + *out = new(OCILayerSelector) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec. +func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec { + if in == nil { + return nil + } + out := new(OCIRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedLayerSelector != nil { + in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector + *out = new(OCILayerSelector) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus. +func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { + if in == nil { + return nil + } + out := new(OCIRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.MatchOIDCIdentity != nil { + in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity + *out = make([]OIDCIdentityMatch, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification. +func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification { + if in == nil { + return nil + } + out := new(OCIRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch. +func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch { + if in == nil { + return nil + } + out := new(OIDCIdentityMatch) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 0d5f3de81..e64321c9d 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -193,13 +193,8 @@ func (in *Bucket) GetInterval() metav1.Duration { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // Bucket is the Schema for the buckets API type Bucket struct { diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go index 7a768a45d..f604a2624 100644 --- a/api/v1beta1/doc.go +++ b/api/v1beta1/doc.go @@ -15,6 +15,9 @@ limitations under the License. */ // Package v1beta1 contains API Schema definitions for the source v1beta1 API group +// +// Deprecated: v1beta1 is no longer supported, use v1 instead. +// // +kubebuilder:object:generate=true // +groupName=source.toolkit.fluxcd.io package v1beta1 diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index c84055e03..05cce7c60 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -265,14 +265,9 @@ func (in *GitRepository) GetInterval() metav1.Duration { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=gitrepo -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // GitRepository is the Schema for the gitrepositories API type GitRepository struct { diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 8d4c0a02d..22e5dda58 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -231,17 +231,9 @@ func (in *HelmChart) GetValuesFiles() []string { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=hc -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` -// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` -// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` -// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // HelmChart is the Schema for the helmcharts API type HelmChart struct { diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 62b0e9a6d..4530b82a9 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -43,7 +43,7 @@ type HelmRepositorySpec struct { // For HTTP/S basic auth the secret must contain username and // password fields. // For TLS the secret must contain a certFile and keyFile, and/or - // caCert fields. + // caFile fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -181,14 +181,9 @@ func (in *HelmRepository) GetInterval() metav1.Duration { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=helmrepo -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // HelmRepository is the Schema for the helmrepositories API type HelmRepository struct { diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 3fd54793d..10be7301e 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1,8 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* -Copyright 2022 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1beta2/artifact_types.go b/api/v1beta2/artifact_types.go index 0832b6ce5..cc88d2a0c 100644 --- a/api/v1beta2/artifact_types.go +++ b/api/v1beta2/artifact_types.go @@ -18,12 +18,16 @@ package v1beta2 import ( "path" + "regexp" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Artifact represents the output of a Source reconciliation. +// +// Deprecated: use Artifact from api/v1 instead. This type will be removed in +// a future release. type Artifact struct { // Path is the relative file path of the Artifact. It can be used to locate // the file in the root of the Artifact storage on the local file system of @@ -43,8 +47,14 @@ type Artifact struct { Revision string `json:"revision"` // Checksum is the SHA256 checksum of the Artifact file. + // Deprecated: use Artifact.Digest instead. // +optional - Checksum string `json:"checksum"` + Checksum string `json:"checksum,omitempty"` + + // Digest is the digest of the file in the form of ':'. + // +optional + // +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$" + Digest string `json:"digest,omitempty"` // LastUpdateTime is the timestamp corresponding to the last update of the // Artifact. @@ -66,7 +76,7 @@ func (in *Artifact) HasRevision(revision string) bool { if in == nil { return false } - return in.Revision == revision + return TransformLegacyRevision(in.Revision) == TransformLegacyRevision(revision) } // HasChecksum returns if the given checksum matches the current Checksum of @@ -90,3 +100,60 @@ func ArtifactDir(kind, namespace, name string) string { func ArtifactPath(kind, namespace, name, filename string) string { return path.Join(ArtifactDir(kind, namespace, name), filename) } + +// TransformLegacyRevision transforms a "legacy" revision string into a "new" +// revision string. It accepts the following formats: +// +// - main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc +// - d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd +// +// Which are transformed into the following formats respectively: +// +// - main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc +// - sha256:d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd +// +// Deprecated, this function exists for backwards compatibility with existing +// resources, and to provide a transition period. Will be removed in a future +// release. +func TransformLegacyRevision(rev string) string { + if rev != "" && strings.LastIndex(rev, ":") == -1 { + if i := strings.LastIndex(rev, "/"); i >= 0 { + sha := rev[i+1:] + if algo := determineSHAType(sha); algo != "" { + if name := rev[:i]; name != "HEAD" { + return name + "@" + algo + ":" + sha + } + return algo + ":" + sha + } + } + if algo := determineSHAType(rev); algo != "" { + return algo + ":" + rev + } + } + return rev +} + +// isAlphaNumHex returns true if the given string only contains 0-9 and a-f +// characters. +var isAlphaNumHex = regexp.MustCompile(`^[0-9a-f]+$`).MatchString + +// determineSHAType returns the SHA algorithm used to compute the provided hex. +// The determination is heuristic and based on the length of the hex string. If +// the size is not recognized, an empty string is returned. +func determineSHAType(hex string) string { + if isAlphaNumHex(hex) { + switch len(hex) { + case 40: + return "sha1" + case 64: + return "sha256" + } + } + return "" +} diff --git a/api/v1beta2/artifact_types_test.go b/api/v1beta2/artifact_types_test.go new file mode 100644 index 000000000..ccf578de3 --- /dev/null +++ b/api/v1beta2/artifact_types_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import "testing" + +func TestTransformLegacyRevision(t *testing.T) { + tests := []struct { + rev string + want string + }{ + { + rev: "HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c", + want: "sha256:5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c", + }, + { + rev: "v1.0.0", + want: "v1.0.0", + }, + { + rev: "v1.0.0-rc1", + want: "v1.0.0-rc1", + }, + { + rev: "v1.0.0-rc1+metadata", + want: "v1.0.0-rc1+metadata", + }, + { + rev: "arbitrary/revision", + want: "arbitrary/revision", + }, + { + rev: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx", + want: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx", + }, + } + for _, tt := range tests { + t.Run(tt.rev, func(t *testing.T) { + if got := TransformLegacyRevision(tt.rev); got != tt.want { + t.Errorf("TransformLegacyRevision() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 2ea66e465..6495abdd0 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -23,6 +23,8 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + + apiv1 "github.com/fluxcd/source-controller/api/v1" ) const ( @@ -31,22 +33,48 @@ const ( ) const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = apiv1.BucketProviderGeneric + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service. + BucketProviderAmazon string = apiv1.BucketProviderAmazon + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = apiv1.BucketProviderGoogle + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = apiv1.BucketProviderAzure + // GenericBucketProvider for any S3 API compatible storage Bucket. - GenericBucketProvider string = "generic" + // + // Deprecated: use BucketProviderGeneric. + GenericBucketProvider string = apiv1.BucketProviderGeneric // AmazonBucketProvider for an AWS S3 object storage Bucket. // Provides support for retrieving credentials from the AWS EC2 service. - AmazonBucketProvider string = "aws" + // + // Deprecated: use BucketProviderAmazon. + AmazonBucketProvider string = apiv1.BucketProviderAmazon // GoogleBucketProvider for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. - GoogleBucketProvider string = "gcp" + // + // Deprecated: use BucketProviderGoogle. + GoogleBucketProvider string = apiv1.BucketProviderGoogle // AzureBucketProvider for an Azure Blob Storage Bucket. // Provides support for authentication using a Service Principal, // Managed Identity or Shared Key. - AzureBucketProvider string = "azure" + // + // Deprecated: use BucketProviderAzure. + AzureBucketProvider string = apiv1.BucketProviderAzure ) // BucketSpec specifies the required configuration to produce an Artifact for // an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" type BucketSpec struct { // Provider of the object storage bucket. // Defaults to 'generic', which expects an S3 (API) compatible object @@ -64,6 +92,14 @@ type BucketSpec struct { // +required Endpoint string `json:"endpoint"` + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + // Insecure allows connecting to a non-TLS HTTP Endpoint. // +optional Insecure bool `json:"insecure,omitempty"` @@ -72,17 +108,49 @@ type BucketSpec struct { // +optional Region string `json:"region,omitempty"` + // Prefix to use for server-side filtering of files in the Bucket. + // +optional + Prefix string `json:"prefix,omitempty"` + // SecretRef specifies the Secret containing authentication credentials // for the Bucket. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` - // Interval at which to check the Endpoint for updates. + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the Bucket Endpoint is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" // +required Interval metav1.Duration `json:"interval"` // Timeout for fetch operations, defaults to 60s. // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -104,6 +172,45 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + // BucketStatus records the observed state of a Bucket. type BucketStatus struct { // ObservedGeneration is the last observed generation of the Bucket object. @@ -122,7 +229,12 @@ type BucketStatus struct { // Artifact represents the last successful Bucket reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -153,15 +265,14 @@ func (in Bucket) GetRequeueAfter() time.Duration { } // GetArtifact returns the latest artifact from the source if present in the status sub-resource. -func (in *Bucket) GetArtifact() *Artifact { +func (in *Bucket) GetArtifact() *meta.Artifact { return in.Status.Artifact } // +genclient -// +genclient:Namespaced -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go index 711469eb8..2b93a1795 100644 --- a/api/v1beta2/condition_types.go +++ b/api/v1beta2/condition_types.go @@ -71,6 +71,10 @@ const ( // required fields, or the provided credentials do not match. AuthenticationFailedReason string = "AuthenticationFailed" + // VerificationError signals that the Source's verification + // check failed. + VerificationError string = "VerificationError" + // DirCreationFailedReason signals a failure caused by a directory creation // operation. DirCreationFailedReason string = "DirectoryCreationFailed" diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index de736c861..89beeb9a7 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -55,18 +55,22 @@ type GitRepositorySpec struct { // SecretRef specifies the Secret containing authentication credentials for // the GitRepository. // For HTTPS repositories the Secret must contain 'username' and 'password' - // fields. + // fields for basic auth or 'bearerToken' field for token auth. // For SSH repositories the Secret must contain 'identity' // and 'known_hosts' fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` // Interval at which to check the GitRepository for updates. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" // +required Interval metav1.Duration `json:"interval"` // Timeout for Git operations like cloning, defaults to 60s. // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -93,6 +97,8 @@ type GitRepositorySpec struct { // GitImplementation specifies which Git client library implementation to // use. Defaults to 'go-git', valid values are ('go-git', 'libgit2'). + // Deprecated: gitImplementation is deprecated now that 'go-git' is the + // only supported implementation. // +kubebuilder:validation:Enum=go-git;libgit2 // +kubebuilder:default:=go-git // +optional @@ -100,7 +106,6 @@ type GitRepositorySpec struct { // RecurseSubmodules enables the initialization of all submodules within // the GitRepository as cloned from the URL, using their default settings. - // This option is available only when using the 'go-git' GitImplementation. // +optional RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` @@ -150,9 +155,6 @@ func (in *GitRepositoryInclude) GetToPath() string { // GitRepositoryRef specifies the Git reference to resolve and checkout. type GitRepositoryRef struct { // Branch to check out, defaults to 'master' if no other field is defined. - // - // When GitRepositorySpec.GitImplementation is set to 'go-git', a shallow - // clone of the specified branch is performed. // +optional Branch string `json:"branch,omitempty"` @@ -164,11 +166,17 @@ type GitRepositoryRef struct { // +optional SemVer string `json:"semver,omitempty"` + // Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + // + // It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + // Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" + // +optional + Name string `json:"name,omitempty"` + // Commit SHA to check out, takes precedence over all reference fields. // - // When GitRepositorySpec.GitImplementation is set to 'go-git', this can be - // combined with Branch to shallow clone the branch, in which the commit is - // expected to exist. + // This can be combined with Branch to shallow clone the branch, in which + // the commit is expected to exist. // +optional Commit string `json:"commit,omitempty"` } @@ -182,7 +190,7 @@ type GitRepositoryVerification struct { // SecretRef specifies the Secret containing the public keys of trusted Git // authors. - SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` + SecretRef meta.LocalObjectReference `json:"secretRef"` } // GitRepositoryStatus records the observed state of a Git repository. @@ -204,12 +212,12 @@ type GitRepositoryStatus struct { // Artifact represents the last successful GitRepository reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // IncludedArtifacts contains a list of the last successfully included // Artifacts as instructed by GitRepositorySpec.Include. // +optional - IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` // ContentConfigChecksum is a checksum of all the configurations related to // the content of the source artifact: @@ -220,9 +228,27 @@ type GitRepositoryStatus struct { // be used to determine if the content of the included repository has // changed. // It has the format of `:`, for example: `sha256:`. + // + // Deprecated: Replaced with explicit fields for observed artifact content + // config in the status. // +optional ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"` + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedRecurseSubmodules is the observed resource submodules + // configuration used to produce the current Artifact. + // +optional + ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"` + + // ObservedInclude is the observed list of GitRepository resources used to + // to produce the current Artifact. + // +optional + ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"` + meta.ReconcileRequestStatus `json:",inline"` } @@ -254,16 +280,15 @@ func (in GitRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the GitRepository if present in // the status sub-resource. -func (in *GitRepository) GetArtifact() *Artifact { +func (in *GitRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } // +genclient -// +genclient:Namespaced -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=gitrepo // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 2ce5a942f..ac24b1c13 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -23,6 +23,8 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + + apiv1 "github.com/fluxcd/source-controller/api/v1" ) // HelmChartKind is the string representation of a HelmChart. @@ -45,7 +47,11 @@ type HelmChartSpec struct { // +required SourceRef LocalHelmChartSourceReference `json:"sourceRef"` - // Interval is the interval at which to check the Source for updates. + // Interval at which the HelmChart SourceRef is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" // +required Interval metav1.Duration `json:"interval"` @@ -74,6 +80,11 @@ type HelmChartSpec struct { // +deprecated ValuesFile string `json:"valuesFile,omitempty"` + // IgnoreMissingValuesFiles controls whether to silently ignore missing values + // files rather than failing. + // +optional + IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this // source. // +optional @@ -84,6 +95,14 @@ type HelmChartSpec struct { // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 // +optional AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // This field is only supported when using HelmRepository source with spec.type 'oci'. + // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + // +optional + Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"` } const ( @@ -129,6 +148,12 @@ type HelmChartStatus struct { // +optional ObservedChartName string `json:"observedChartName,omitempty"` + // ObservedValuesFiles are the observed value files of the last successful + // reconciliation. + // It matches the chart in the last successfully reconciled artifact. + // +optional + ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"` + // Conditions holds the conditions for the HelmChart. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` @@ -141,7 +166,7 @@ type HelmChartStatus struct { // Artifact represents the output of the last successful reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -174,7 +199,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { +func (in *HelmChart) GetArtifact() *meta.Artifact { return in.Status.Artifact } @@ -190,11 +215,10 @@ func (in *HelmChart) GetValuesFiles() []string { } // +genclient -// +genclient:Namespaced -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=hc // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` // +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index 87c0b16b8..56cbd928c 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -43,6 +43,7 @@ const ( type HelmRepositorySpec struct { // URL of the Helm repository, a valid URL contains at least a protocol and // host. + // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$" // +required URL string `json:"url"` @@ -50,11 +51,29 @@ type HelmRepositorySpec struct { // for the HelmRepository. // For HTTP/S basic auth the secret must contain 'username' and 'password' // fields. - // For TLS the secret must contain a 'certFile' and 'keyFile', and/or - // 'caCert' fields. + // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + // keys is deprecated. Please use `.spec.certSecretRef` instead. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // It takes precedence over the values specified in the Secret referred + // to by `.spec.secretRef`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + // PassCredentials allows the credentials from the SecretRef to be passed // on to a host that does not match the host as defined in URL. // This may be required if the host of the advertised chart URLs in the @@ -64,12 +83,25 @@ type HelmRepositorySpec struct { // +optional PassCredentials bool `json:"passCredentials,omitempty"` - // Interval at which to check the URL for updates. - // +required - Interval metav1.Duration `json:"interval"` + // Interval at which the HelmRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +optional + Interval metav1.Duration `json:"interval,omitempty"` - // Timeout of the index fetch operation, defaults to 60s. - // +kubebuilder:default:="60s" + // Insecure allows connecting to a non-TLS HTTP container registry. + // This field is only taken into account if the .spec.type field is set to 'oci'. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Timeout is used for the index fetch operation for an HTTPS helm repository, + // and for remote OCI Repository operations like pulling for an OCI helm + // chart by the associated HelmChart. + // Its default value is 60s. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -89,6 +121,14 @@ type HelmRepositorySpec struct { // +kubebuilder:validation:Enum=default;oci // +optional Type string `json:"type,omitempty"` + + // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` } // HelmRepositoryStatus records the observed state of the HelmRepository. @@ -110,7 +150,7 @@ type HelmRepositoryStatus struct { // Artifact represents the last successful HelmRepository reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -134,21 +174,32 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { // GetRequeueAfter returns the duration after which the source must be // reconciled again. func (in HelmRepository) GetRequeueAfter() time.Duration { - return in.Spec.Interval.Duration + if in.Spec.Interval.Duration != 0 { + return in.Spec.Interval.Duration + } + return time.Minute +} + +// GetTimeout returns the timeout duration used for various operations related +// to this HelmRepository. +func (in HelmRepository) GetTimeout() time.Duration { + if in.Spec.Timeout != nil { + return in.Spec.Timeout.Duration + } + return time.Minute } // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmRepository) GetArtifact() *Artifact { +func (in *HelmRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } // +genclient -// +genclient:Namespaced -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=helmrepo // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" diff --git a/api/v1beta2/ocirepository_types.go b/api/v1beta2/ocirepository_types.go index 83ff7f3ff..760f0d8f1 100644 --- a/api/v1beta2/ocirepository_types.go +++ b/api/v1beta2/ocirepository_types.go @@ -22,6 +22,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/meta" + + apiv1 "github.com/fluxcd/source-controller/api/v1" ) const ( @@ -45,6 +47,12 @@ const ( // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal, // Managed Identity or Shared Key. AzureOCIProvider string = "azure" + + // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer. + OCILayerExtract = "extract" + + // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer. + OCILayerCopy = "copy" ) // OCIRepositorySpec defines the desired state of OCIRepository @@ -60,6 +68,11 @@ type OCIRepositorySpec struct { // +optional Reference *OCIRepositoryRef `json:"ref,omitempty"` + // LayerSelector specifies which layer should be extracted from the OCI artifact. + // When not specified, the first layer found in the artifact is selected. + // +optional + LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"` + // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. // When not specified, defaults to 'generic'. // +kubebuilder:validation:Enum=generic;aws;azure;gcp @@ -73,32 +86,53 @@ type OCIRepositorySpec struct { // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // +optional + Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"` + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate // the image pull if the service account has attached pull secrets. For more information: // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account // +optional ServiceAccountName string `json:"serviceAccountName,omitempty"` - // CertSecretRef can be given the name of a secret containing + // CertSecretRef can be given the name of a Secret containing // either or both of // - // - a PEM-encoded client certificate (`certFile`) and private - // key (`keyFile`); - // - a PEM-encoded CA certificate (`caFile`) + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) // - // and whichever are supplied, will be used for connecting to the - // registry. The client cert and key are useful if you are - // authenticating with a certificate; the CA cert is useful if - // you are using a self-signed server certificate. + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // Note: Support for the `caFile`, `certFile` and `keyFile` keys have + // been deprecated. // +optional CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` - // The interval at which to check for image updates. + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the OCIRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" // +required Interval metav1.Duration `json:"interval"` // The timeout for remote OCI Repository operations like pulling, defaults to 60s. // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -108,6 +142,10 @@ type OCIRepositorySpec struct { // +optional Ignore *string `json:"ignore,omitempty"` + // Insecure allows connecting to a non-TLS HTTP container registry. + // +optional + Insecure bool `json:"insecure,omitempty"` + // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` @@ -125,20 +163,30 @@ type OCIRepositoryRef struct { // +optional SemVer string `json:"semver,omitempty"` + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + // Tag is the image tag to pull, defaults to latest. // +optional Tag string `json:"tag,omitempty"` } -// OCIRepositoryVerification verifies the authenticity of an OCI Artifact -type OCIRepositoryVerification struct { - // Provider specifies the technology used to sign the OCI Artifact. - // +kubebuilder:validation:Enum=cosign - Provider string `json:"provider"` +// OCILayerSelector specifies which layer should be extracted from an OCI Artifact +type OCILayerSelector struct { + // MediaType specifies the OCI media type of the layer + // which should be extracted from the OCI Artifact. The + // first layer matching this type is selected. + // +optional + MediaType string `json:"mediaType,omitempty"` - // SecretRef specifies the Kubernetes Secret containing the - // trusted public keys. - SecretRef meta.LocalObjectReference `json:"secretRef"` + // Operation specifies how the selected layer should be processed. + // By default, the layer compressed content is extracted to storage. + // When the operation is set to 'copy', the layer compressed content + // is persisted to storage as it is. + // +kubebuilder:validation:Enum=extract;copy + // +optional + Operation string `json:"operation,omitempty"` } // OCIRepositoryStatus defines the observed state of OCIRepository @@ -157,7 +205,31 @@ type OCIRepositoryStatus struct { // Artifact represents the output of the last successful OCI Repository sync. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ContentConfigChecksum is a checksum of all the configurations related to + // the content of the source artifact: + // - .spec.ignore + // - .spec.layerSelector + // observed in .status.observedGeneration version of the object. This can + // be used to determine if the content configuration has changed and the + // artifact needs to be rebuilt. + // It has the format of `:`, for example: `sha256:`. + // + // Deprecated: Replaced with explicit fields for observed artifact content + // config in the status. + // +optional + ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedLayerSelector is the observed layer selector used for constructing + // the source artifact. + // +optional + ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -188,16 +260,33 @@ func (in OCIRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the OCIRepository if present in // the status sub-resource. -func (in *OCIRepository) GetArtifact() *Artifact { +func (in *OCIRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } +// GetLayerMediaType returns the media type layer selector if found in spec. +func (in *OCIRepository) GetLayerMediaType() string { + if in.Spec.LayerSelector == nil { + return "" + } + + return in.Spec.LayerSelector.MediaType +} + +// GetLayerOperation returns the layer selector operation (defaults to extract). +func (in *OCIRepository) GetLayerOperation() string { + if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" { + return OCILayerExtract + } + + return in.Spec.LayerSelector.Operation +} + // +genclient -// +genclient:Namespaced -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=ocirepo // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go index 76e2cc21e..4111c0998 100644 --- a/api/v1beta2/source.go +++ b/api/v1beta2/source.go @@ -33,6 +33,9 @@ const ( // interval. It must be supported by all kinds of the source.toolkit.fluxcd.io // API group. // +// Deprecated: use the Source interface from api/v1 instead. This type will be +// removed in a future release. +// // +k8s:deepcopy-gen=false type Source interface { runtime.Object diff --git a/pkg/git/libgit2/managed/const.go b/api/v1beta2/sts_types.go similarity index 55% rename from pkg/git/libgit2/managed/const.go rename to api/v1beta2/sts_types.go index f41035da7..c07c05123 100644 --- a/pkg/git/libgit2/managed/const.go +++ b/api/v1beta2/sts_types.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Flux authors +Copyright 2024 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package managed +package v1beta2 const ( - // URLMaxLength represents the max length for the entire URL - // when cloning Git repositories via HTTP(S). - URLMaxLength = 2048 - - // PathMaxLength represents the max length for the path element - // when cloning Git repositories via SSH. - PathMaxLength = 4096 + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" ) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index fc186d4df..0b874dd7e 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1,8 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* -Copyright 2022 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,6 +23,7 @@ package v1beta2 import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + apiv1 "github.com/fluxcd/source-controller/api/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -115,14 +115,54 @@ func (in *BucketList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(meta.LocalObjectReference) **out = **in } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } out.Interval = in.Interval if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout @@ -163,9 +203,14 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } out.ReconcileRequestStatus = in.ReconcileRequestStatus } @@ -332,20 +377,30 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.IncludedArtifacts != nil { in, out := &in.IncludedArtifacts, &out.IncludedArtifacts - *out = make([]*Artifact, len(*in)) + *out = make([]*meta.Artifact, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } } } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedInclude != nil { + in, out := &in.ObservedInclude, &out.ObservedInclude + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } out.ReconcileRequestStatus = in.ReconcileRequestStatus } @@ -449,6 +504,11 @@ func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { *out = new(acl.AccessFrom) (*in).DeepCopyInto(*out) } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(apiv1.OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. @@ -464,6 +524,11 @@ func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { *out = *in + if in.ObservedValuesFiles != nil { + in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]v1.Condition, len(*in)) @@ -473,7 +538,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -556,6 +621,11 @@ func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { *out = new(meta.LocalObjectReference) **out = **in } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } out.Interval = in.Interval if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout @@ -591,7 +661,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -622,6 +692,21 @@ func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReferen return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector. +func (in *OCILayerSelector) DeepCopy() *OCILayerSelector { + if in == nil { + return nil + } + out := new(OCILayerSelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OCIRepository) DeepCopyInto(out *OCIRepository) { *out = *in @@ -704,16 +789,31 @@ func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { *out = new(OCIRepositoryRef) **out = **in } + if in.LayerSelector != nil { + in, out := &in.LayerSelector, &out.LayerSelector + *out = new(OCILayerSelector) + **out = **in + } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(meta.LocalObjectReference) **out = **in } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(apiv1.OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } if in.CertSecretRef != nil { in, out := &in.CertSecretRef, &out.CertSecretRef *out = new(meta.LocalObjectReference) **out = **in } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } out.Interval = in.Interval if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout @@ -749,9 +849,19 @@ func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedLayerSelector != nil { + in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector + *out = new(OCILayerSelector) + **out = **in + } out.ReconcileRequestStatus = in.ReconcileRequestStatus } @@ -764,19 +874,3 @@ func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) { - *out = *in - out.SecretRef = in.SecretRef -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification. -func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification { - if in == nil { - return nil - } - out := new(OCIRepositoryVerification) - in.DeepCopyInto(out) - return out -} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index d8fc0f533..f578c8da0 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: buckets.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -20,94 +18,122 @@ spec: - jsonPath: .spec.endpoint name: Endpoint type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: Bucket is the Schema for the buckets API + description: Bucket is the Schema for the buckets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: BucketSpec defines the desired state of an S3 compatible - bucket + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. + bucketName: + description: BucketName is the name of the object storage bucket. + type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `generic` provider. properties: - namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. - items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - type: array + name: + description: Name of the referent. + type: string required: - - namespaceSelectors + - name type: object - bucketName: - description: The bucket name. - type: string endpoint: - description: The bucket endpoint address. + description: Endpoint is the object storage address the BucketName + is located at. type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string insecure: - description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. + description: Insecure allows connecting to a non-TLS HTTP Endpoint. type: boolean interval: - description: The interval at which to check for bucket updates. + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + prefix: + description: Prefix to use for server-side filtering of files in the + Bucket. type: string provider: default: generic - description: The S3 compatible storage provider name, default ('generic'). + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. enum: - generic - aws - gcp + - azure type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object region: - description: The bucket region. + description: Region of the Endpoint where the BucketName is located + in. type: string secretRef: - description: The name of the secret containing authentication credentials + description: |- + SecretRef specifies the Secret containing authentication credentials for the Bucket. properties: name: @@ -116,92 +142,191 @@ spec: required: - name type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the bucket. This field is only supported for the 'gcp' and 'aws' providers. + For more information about workload identity: + https://fluxcd.io/flux/components/source/buckets/#workload-identity + type: string + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. type: boolean timeout: default: 60s - description: The timeout for download operations, defaults to 60s. + description: Timeout for fetch operations, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string required: - bucketName - endpoint - interval type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' + - message: ServiceAccountName is not supported for the 'generic' Bucket + provider + rule: self.provider != 'generic' || !has(self.serviceAccountName) + - message: cannot set both .spec.secretRef and .spec.serviceAccountName + rule: '!has(self.secretRef) || !has(self.serviceAccountName)' status: default: observedGeneration: -1 - description: BucketStatus defines the observed state of a bucket + description: BucketStatus records the observed state of a Bucket. properties: artifact: - description: Artifact represents the output of the last successful - Bucket sync. + description: Artifact represents the last successful Bucket reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -216,10 +341,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -232,22 +353,31 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: ObservedGeneration is the last observed generation of + the Bucket object. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string url: - description: URL is the download link for the artifact output of the - last Bucket sync. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -263,49 +393,57 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 Bucket is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: Bucket is the Schema for the buckets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: BucketSpec specifies the required configuration to produce - an Artifact for an object storage bucket. + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -315,39 +453,84 @@ spec: bucketName: description: BucketName is the name of the object storage bucket. type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `generic` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object endpoint: description: Endpoint is the object storage address the BucketName is located at. type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string insecure: description: Insecure allows connecting to a non-TLS HTTP Endpoint. type: boolean interval: - description: Interval at which to check the Endpoint for updates. + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + prefix: + description: Prefix to use for server-side filtering of files in the + Bucket. type: string provider: default: generic - description: Provider of the object storage bucket. Defaults to 'generic', - which expects an S3 (API) compatible object storage. + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. enum: - generic - aws - gcp - azure type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object region: description: Region of the Endpoint where the BucketName is located in. type: string secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the Bucket. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the Bucket. properties: name: description: Name of the referent. @@ -355,19 +538,96 @@ spec: required: - name type: object + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this Bucket. + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. type: boolean timeout: default: 60s description: Timeout for fetch operations, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string required: - bucketName - endpoint - interval type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' status: default: observedGeneration: -1 @@ -376,12 +636,14 @@ spec: artifact: description: Artifact represents the last successful Bucket reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the Artifact file. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -390,70 +652,65 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -468,10 +725,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -484,29 +737,30 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation of the Bucket object. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml new file mode 100644 index 000000000..23cdf63c3 --- /dev/null +++ b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: externalartifacts.source.toolkit.fluxcd.io +spec: + group: source.toolkit.fluxcd.io + names: + kind: ExternalArtifact + listKind: ExternalArtifactList + plural: externalartifacts + singular: externalartifact + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .spec.sourceRef.name + name: Source + type: string + name: v1 + schema: + openAPIV3Schema: + description: ExternalArtifact is the Schema for the external artifacts API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExternalArtifactSpec defines the desired state of ExternalArtifact + properties: + sourceRef: + description: |- + SourceRef points to the Kubernetes custom resource for + which the artifact is generated. + properties: + apiVersion: + description: API version of the referent, if not specified the + Kubernetes preferred version will be used. + type: string + kind: + description: Kind of the referent. + type: string + name: + description: Name of the referent. + type: string + namespace: + description: Namespace of the referent, when not specified it + acts as LocalObjectReference. + type: string + required: + - kind + - name + type: object + type: object + status: + description: ExternalArtifactStatus defines the observed state of ExternalArtifact + properties: + artifact: + description: Artifact represents the output of an ExternalArtifact + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the ExternalArtifact. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index b260fb694..10663e473 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: gitrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -22,88 +20,66 @@ spec: - jsonPath: .spec.url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: GitRepository is the Schema for the gitrepositories API + description: GitRepository is the Schema for the gitrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: GitRepositorySpec defines the desired state of a Git repository. + description: |- + GitRepositorySpec specifies the required configuration to produce an + Artifact for a Git repository. properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. - items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object - gitImplementation: - default: go-git - description: Determines which git client library to use. Defaults - to go-git, valid values are ('go-git', 'libgit2'). - enum: - - go-git - - libgit2 - type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string include: - description: Extra git repositories to map into the repository + description: |- + Include specifies a list of GitRepository resources which Artifacts + should be included in the Artifact produced for this GitRepository. items: - description: GitRepositoryInclude defines a source with a from and - to path. + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. properties: fromPath: - description: The path to copy contents from, defaults to the - root directory. + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. type: string repository: - description: Reference to a GitRepository to include. + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. properties: name: description: Name of the referent. @@ -112,45 +88,85 @@ spec: - name type: object toPath: - description: The path to copy contents to, defaults to the name - of the source ref. + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. type: string required: - repository type: object type: array interval: - description: The interval at which to check for repository updates. + description: |- + Interval at which the GitRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + provider: + description: |- + Provider used for authentication, can be 'azure', 'github', 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - azure + - github type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Git server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object recurseSubmodules: - description: When enabled, after the clone is created, initializes - all submodules within, using their default settings. This option - is available only when using the 'go-git' GitImplementation. + description: |- + RecurseSubmodules enables the initialization of all submodules within + the GitRepository as cloned from the URL, using their default settings. type: boolean ref: - description: The Git reference to checkout and monitor for changes, - defaults to master branch. + description: |- + Reference specifies the Git reference to resolve and monitor for + changes, defaults to the 'master' branch. properties: branch: - description: The Git branch to checkout, defaults to master. + description: Branch to check out, defaults to 'master' if no other + field is defined. type: string commit: - description: The Git commit SHA to checkout, if specified Tag - filters will be ignored. + description: |- + Commit SHA to check out, takes precedence over all reference fields. + + This can be combined with Branch to shallow clone the branch, in which + the commit is expected to exist. + type: string + name: + description: |- + Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + + It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string semver: - description: The Git tag semver expression, takes precedence over - Tag. + description: SemVer tag expression to check out, takes precedence + over Tag. type: string tag: - description: The Git tag to checkout, takes precedence over Branch. + description: Tag to check out, takes precedence over Branch. type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS - repositories the secret must contain username and password fields. - For SSH repositories the secret must contain identity and known_hosts - fields. + description: |- + SecretRef specifies the Secret containing authentication credentials for + the GitRepository. + For HTTPS repositories the Secret must contain 'username' and 'password' + fields for basic auth or 'bearerToken' field for token auth. + For SSH repositories the Secret must contain 'identity' + and 'known_hosts' fields. properties: name: description: Name of the referent. @@ -158,32 +174,58 @@ spec: required: - name type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to + authenticate to the GitRepository. This field is only supported for 'azure' provider. + type: string + sparseCheckout: + description: |- + SparseCheckout specifies a list of directories to checkout when cloning + the repository. If specified, only these directories are included in the + Artifact produced for this GitRepository. + items: + type: string + type: array suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + GitRepository. type: boolean timeout: default: 60s - description: The timeout for remote Git operations like cloning, defaults - to 60s. + description: Timeout for Git operations like cloning, defaults to + 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string url: - description: The repository URL, can be a HTTP/S or SSH address. + description: URL specifies the Git repository URL, it can be an HTTP/S + or SSH address. pattern: ^(http|https|ssh)://.*$ type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points - to. + description: |- + Verification specifies the configuration to verify the Git commit + signature(s). properties: mode: - description: Mode describes what git object should be verified, - currently ('head'). + default: HEAD + description: |- + Mode specifies which Git object(s) should be verified. + + The variants "head" and "HEAD" both imply the same thing, i.e. verify + the commit that the HEAD of the Git repository points to. The variant + "head" solely exists to ensure backwards compatibility. enum: - head + - HEAD + - Tag + - TagAndHEAD type: string secretRef: - description: The secret name containing the public keys of all - trusted Git authors. + description: |- + SecretRef specifies the Secret containing the public keys of trusted Git + authors. properties: name: description: Name of the referent. @@ -192,85 +234,100 @@ spec: - name type: object required: - - mode + - secretRef type: object required: - interval - url type: object + x-kubernetes-validations: + - message: serviceAccountName can only be set when provider is 'azure' + rule: '!has(self.serviceAccountName) || (has(self.provider) && self.provider + == ''azure'')' status: default: observedGeneration: -1 - description: GitRepositoryStatus defines the observed state of a Git repository. + description: GitRepositoryStatus records the observed state of a Git repository. properties: artifact: - description: Artifact represents the output of the last successful - repository sync. + description: Artifact represents the last successful GitRepository + reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -285,10 +342,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -301,52 +354,130 @@ spec: type: object type: array includedArtifacts: - description: IncludedArtifacts represents the included artifacts from - the last successful repository sync. + description: |- + IncludedArtifacts contains a list of the last successfully included + Artifacts as instructed by GitRepositorySpec.Include. items: - description: Artifact represents the output of a source synchronisation. + description: Artifact represents the output of a Source reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of + ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI + annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the GitRepository + object. format: int64 type: integer - url: - description: URL is the download link for the artifact output of the - last repository sync. + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedInclude: + description: |- + ObservedInclude is the observed list of GitRepository resources used to + produce the current Artifact. + items: + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. + properties: + fromPath: + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. + type: string + repository: + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. + type: string + required: + - repository + type: object + type: array + observedRecurseSubmodules: + description: |- + ObservedRecurseSubmodules is the observed resource submodules + configuration used to produce the current Artifact. + type: boolean + observedSparseCheckout: + description: |- + ObservedSparseCheckout is the observed list of directories used to + produce the current Artifact. + items: + type: string + type: array + sourceVerificationMode: + description: |- + SourceVerificationMode is the last used verification mode indicating + which Git object(s) have been verified. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -362,49 +493,57 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 GitRepository is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: GitRepository is the Schema for the gitrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: GitRepositorySpec specifies the required configuration to - produce an Artifact for a Git repository. + description: |- + GitRepositorySpec specifies the required configuration to produce an + Artifact for a Git repository. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -413,34 +552,39 @@ spec: type: object gitImplementation: default: go-git - description: GitImplementation specifies which Git client library - implementation to use. Defaults to 'go-git', valid values are ('go-git', - 'libgit2'). + description: |- + GitImplementation specifies which Git client library implementation to + use. Defaults to 'go-git', valid values are ('go-git', 'libgit2'). + Deprecated: gitImplementation is deprecated now that 'go-git' is the + only supported implementation. enum: - go-git - libgit2 type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string include: - description: Include specifies a list of GitRepository resources which - Artifacts should be included in the Artifact produced for this GitRepository. + description: |- + Include specifies a list of GitRepository resources which Artifacts + should be included in the Artifact produced for this GitRepository. items: - description: GitRepositoryInclude specifies a local reference to - a GitRepository which Artifact (sub-)contents must be included, - and where they should be placed. + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. properties: fromPath: - description: FromPath specifies the path to copy contents from, - defaults to the root of the Artifact. + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. type: string repository: - description: GitRepositoryRef specifies the GitRepository which - Artifact contents must be included. + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. properties: name: description: Name of the referent. @@ -449,8 +593,9 @@ spec: - name type: object toPath: - description: ToPath specifies the path to copy contents to, - defaults to the name of the GitRepositoryRef. + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. type: string required: - repository @@ -458,28 +603,35 @@ spec: type: array interval: description: Interval at which to check the GitRepository for updates. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string recurseSubmodules: - description: RecurseSubmodules enables the initialization of all submodules - within the GitRepository as cloned from the URL, using their default - settings. This option is available only when using the 'go-git' - GitImplementation. + description: |- + RecurseSubmodules enables the initialization of all submodules within + the GitRepository as cloned from the URL, using their default settings. type: boolean ref: - description: Reference specifies the Git reference to resolve and - monitor for changes, defaults to the 'master' branch. + description: |- + Reference specifies the Git reference to resolve and monitor for + changes, defaults to the 'master' branch. properties: branch: - description: "Branch to check out, defaults to 'master' if no - other field is defined. \n When GitRepositorySpec.GitImplementation - is set to 'go-git', a shallow clone of the specified branch - is performed." + description: Branch to check out, defaults to 'master' if no other + field is defined. type: string commit: - description: "Commit SHA to check out, takes precedence over all - reference fields. \n When GitRepositorySpec.GitImplementation - is set to 'go-git', this can be combined with Branch to shallow - clone the branch, in which the commit is expected to exist." + description: |- + Commit SHA to check out, takes precedence over all reference fields. + + This can be combined with Branch to shallow clone the branch, in which + the commit is expected to exist. + type: string + name: + description: |- + Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + + It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string semver: description: SemVer tag expression to check out, takes precedence @@ -490,10 +642,13 @@ spec: type: string type: object secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the GitRepository. For HTTPS repositories the Secret - must contain 'username' and 'password' fields. For SSH repositories - the Secret must contain 'identity' and 'known_hosts' fields. + description: |- + SecretRef specifies the Secret containing authentication credentials for + the GitRepository. + For HTTPS repositories the Secret must contain 'username' and 'password' + fields for basic auth or 'bearerToken' field for token auth. + For SSH repositories the Secret must contain 'identity' + and 'known_hosts' fields. properties: name: description: Name of the referent. @@ -502,13 +657,15 @@ spec: - name type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this GitRepository. + description: |- + Suspend tells the controller to suspend the reconciliation of this + GitRepository. type: boolean timeout: default: 60s description: Timeout for Git operations like cloning, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string url: description: URL specifies the Git repository URL, it can be an HTTP/S @@ -516,8 +673,9 @@ spec: pattern: ^(http|https|ssh)://.*$ type: string verify: - description: Verification specifies the configuration to verify the - Git commit signature(s). + description: |- + Verification specifies the configuration to verify the Git commit + signature(s). properties: mode: description: Mode specifies what Git object should be verified, @@ -526,8 +684,9 @@ spec: - head type: string secretRef: - description: SecretRef specifies the Secret containing the public - keys of trusted Git authors. + description: |- + SecretRef specifies the Secret containing the public keys of trusted Git + authors. properties: name: description: Name of the referent. @@ -537,6 +696,7 @@ spec: type: object required: - mode + - secretRef type: object required: - interval @@ -551,12 +711,14 @@ spec: description: Artifact represents the last successful GitRepository reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the Artifact file. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -565,70 +727,65 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -643,10 +800,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -659,27 +812,36 @@ spec: type: object type: array contentConfigChecksum: - description: 'ContentConfigChecksum is a checksum of all the configurations - related to the content of the source artifact: - .spec.ignore - - .spec.recurseSubmodules - .spec.included and the checksum of the - included artifacts observed in .status.observedGeneration version - of the object. This can be used to determine if the content of the - included repository has changed. It has the format of `:`, - for example: `sha256:`.' + description: |- + ContentConfigChecksum is a checksum of all the configurations related to + the content of the source artifact: + - .spec.ignore + - .spec.recurseSubmodules + - .spec.included and the checksum of the included artifacts + observed in .status.observedGeneration version of the object. This can + be used to determine if the content of the included repository has + changed. + It has the format of `:`, for example: `sha256:`. + + Deprecated: Replaced with explicit fields for observed artifact content + config in the status. type: string includedArtifacts: - description: IncludedArtifacts contains a list of the last successfully - included Artifacts as instructed by GitRepositorySpec.Include. + description: |- + IncludedArtifacts contains a list of the last successfully included + Artifacts as instructed by GitRepositorySpec.Include. items: description: Artifact represents the output of a Source reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the Artifact - file. + digest: + description: Digest is the digest of the file in the form of + ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -689,55 +851,99 @@ spec: annotations. type: object path: - description: Path is the relative file path of the Artifact. - It can be used to locate the file in the root of the Artifact - storage on the local file system of the controller managing - the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the GitRepository object. + description: |- + ObservedGeneration is the last observed generation of the GitRepository + object. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedInclude: + description: |- + ObservedInclude is the observed list of GitRepository resources used to + to produce the current Artifact. + items: + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. + properties: + fromPath: + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. + type: string + repository: + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. + type: string + required: + - repository + type: object + type: array + observedRecurseSubmodules: + description: |- + ObservedRecurseSubmodules is the observed resource submodules + configuration used to produce the current Artifact. + type: boolean url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise GitRepositoryStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + GitRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index 6b15e7bfb..0e57c72a5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: helmcharts.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -31,88 +29,79 @@ spec: - jsonPath: .spec.sourceRef.name name: Source Name type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: HelmChart is the Schema for the helmcharts API + description: HelmChart is the Schema for the helmcharts API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmChartSpec defines the desired state of a Helm chart. + description: HelmChartSpec specifies the desired state of a Helm chart. properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. - items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object chart: - description: The name or path the Helm chart is available at in the + description: |- + Chart is the name or path the Helm chart is available at in the SourceRef. type: string + ignoreMissingValuesFiles: + description: |- + IgnoreMissingValuesFiles controls whether to silently ignore missing values + files rather than failing. + type: boolean interval: - description: The interval at which to check the Source for updates. + description: |- + Interval at which the HelmChart SourceRef is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string reconcileStrategy: default: ChartVersion - description: Determines what enables the creation of a new artifact. - Valid values are ('ChartVersion', 'Revision'). See the documentation - of the values for an explanation on their behavior. Defaults to - ChartVersion when omitted. + description: |- + ReconcileStrategy determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). + See the documentation of the values for an explanation on their behavior. + Defaults to ChartVersion when omitted. enum: - ChartVersion - Revision type: string sourceRef: - description: The reference to the Source the chart is available at. + description: SourceRef is the reference to the Source the chart is + available at. properties: apiVersion: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', - 'GitRepository', 'Bucket'). + description: |- + Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + 'Bucket'). enum: - HelmRepository - GitRepository @@ -126,28 +115,83 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + source. type: boolean - valuesFile: - description: Alternative values file to use as the default chart values, - expected to be a relative path in the SourceRef. Deprecated in favor - of ValuesFiles, for backwards compatibility the file defined here - is merged before the ValuesFiles items. Ignored when omitted. - type: string valuesFiles: - description: Alternative list of values files to use as the chart - values (values.yaml is not included by default), expected to be - a relative path in the SourceRef. Values files are merged in the - order of this list with the last file overriding the first. Ignored - when omitted. + description: |- + ValuesFiles is an alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be a + relative path in the SourceRef. + Values files are merged in the order of this list with the last file + overriding the first. Ignored when omitted. items: type: string type: array + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + This field is only supported when using HelmRepository source with spec.type 'oci'. + Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object version: default: '*' - description: The chart version semver expression, ignored for charts - from GitRepository and Bucket sources. Defaults to latest when omitted. + description: |- + Version is the chart version semver expression, ignored for charts from + GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -157,76 +201,87 @@ spec: status: default: observedGeneration: -1 - description: HelmChartStatus defines the observed state of the HelmChart. + description: HelmChartStatus records the observed state of the HelmChart. properties: artifact: description: Artifact represents the output of the last successful - chart sync. + reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -241,10 +296,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -257,21 +308,45 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedChartName: + description: |- + ObservedChartName is the last observed chart name as specified by the + resolved chart reference. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the HelmChart + object. format: int64 type: integer + observedSourceArtifactRevision: + description: |- + ObservedSourceArtifactRevision is the last observed Artifact.Revision + of the HelmChartSpec.SourceRef. + type: string + observedValuesFiles: + description: |- + ObservedValuesFiles are the observed value files of the last successful + reconciliation. + It matches the chart in the last successfully reconciled artifact. + items: + type: string + type: array url: - description: URL is the download link for the last chart pulled. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -296,20 +371,27 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 HelmChart is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: HelmChart is the Schema for the helmcharts API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -317,27 +399,27 @@ spec: description: HelmChartSpec specifies the desired state of a Helm chart. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -345,19 +427,29 @@ spec: - namespaceSelectors type: object chart: - description: Chart is the name or path the Helm chart is available - at in the SourceRef. + description: |- + Chart is the name or path the Helm chart is available at in the + SourceRef. type: string + ignoreMissingValuesFiles: + description: |- + IgnoreMissingValuesFiles controls whether to silently ignore missing values + files rather than failing. + type: boolean interval: - description: Interval is the interval at which to check the Source - for updates. + description: |- + Interval at which the HelmChart SourceRef is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string reconcileStrategy: default: ChartVersion - description: ReconcileStrategy determines what enables the creation - of a new artifact. Valid values are ('ChartVersion', 'Revision'). - See the documentation of the values for an explanation on their - behavior. Defaults to ChartVersion when omitted. + description: |- + ReconcileStrategy determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). + See the documentation of the values for an explanation on their behavior. + Defaults to ChartVersion when omitted. enum: - ChartVersion - Revision @@ -370,8 +462,9 @@ spec: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', - 'GitRepository', 'Bucket'). + description: |- + Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + 'Bucket'). enum: - HelmRepository - GitRepository @@ -385,30 +478,90 @@ spec: - name type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + source. type: boolean valuesFile: - description: ValuesFile is an alternative values file to use as the - default chart values, expected to be a relative path in the SourceRef. - Deprecated in favor of ValuesFiles, for backwards compatibility - the file specified here is merged before the ValuesFiles items. - Ignored when omitted. + description: |- + ValuesFile is an alternative values file to use as the default chart + values, expected to be a relative path in the SourceRef. Deprecated in + favor of ValuesFiles, for backwards compatibility the file specified here + is merged before the ValuesFiles items. Ignored when omitted. type: string valuesFiles: - description: ValuesFiles is an alternative list of values files to - use as the chart values (values.yaml is not included by default), - expected to be a relative path in the SourceRef. Values files are - merged in the order of this list with the last file overriding the - first. Ignored when omitted. + description: |- + ValuesFiles is an alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be a + relative path in the SourceRef. + Values files are merged in the order of this list with the last file + overriding the first. Ignored when omitted. items: type: string type: array + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + This field is only supported when using HelmRepository source with spec.type 'oci'. + Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object version: default: '*' - description: Version is the chart version semver expression, ignored - for charts from GitRepository and Bucket sources. Defaults to latest - when omitted. + description: |- + Version is the chart version semver expression, ignored for charts from + GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -424,12 +577,14 @@ spec: description: Artifact represents the output of the last successful reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the Artifact file. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -438,70 +593,65 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -516,10 +666,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -532,37 +678,44 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedChartName: - description: ObservedChartName is the last observed chart name as - specified by the resolved chart reference. + description: |- + ObservedChartName is the last observed chart name as specified by the + resolved chart reference. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the HelmChart object. + description: |- + ObservedGeneration is the last observed generation of the HelmChart + object. format: int64 type: integer observedSourceArtifactRevision: - description: ObservedSourceArtifactRevision is the last observed Artifact.Revision + description: |- + ObservedSourceArtifactRevision is the last observed Artifact.Revision of the HelmChartSpec.SourceRef. type: string + observedValuesFiles: + description: |- + ObservedValuesFiles are the observed value files of the last successful + reconciliation. + It matches the chart in the last successfully reconciled artifact. + items: + type: string + type: array url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index c19552fdd..750a36500 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: helmrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -22,78 +20,135 @@ spec: - jsonPath: .spec.url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: HelmRepository is the Schema for the helmrepositories API + description: HelmRepository is the Schema for the helmrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmRepositorySpec defines the reference to a Helm repository. + description: |- + HelmRepositorySpec specifies the required configuration to produce an + Artifact for a Helm repository index YAML. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array required: - namespaceSelectors type: object + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + It takes precedence over the values specified in the Secret referred + to by `.spec.secretRef`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + insecure: + description: |- + Insecure allows connecting to a non-TLS HTTP container registry. + This field is only taken into account if the .spec.type field is set to 'oci'. + type: boolean interval: - description: The interval at which to check the upstream for updates. + description: |- + Interval at which the HelmRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string passCredentials: - description: PassCredentials allows the credentials from the SecretRef - to be passed on to a host that does not match the host as defined - in URL. This may be required if the host of the advertised chart - URLs in the index differ from the defined URL. Enabling this should - be done with caution, as it can potentially result in credentials - getting stolen in a MITM-attack. + description: |- + PassCredentials allows the credentials from the SecretRef to be passed + on to a host that does not match the host as defined in URL. + This may be required if the host of the advertised chart URLs in the + index differ from the defined URL. + Enabling this should be done with caution, as it can potentially result + in credentials getting stolen in a MITM-attack. type: boolean + provider: + default: generic + description: |- + Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string secretRef: - description: The name of the secret containing authentication credentials - for the Helm repository. For HTTP/S basic auth the secret must contain - username and password fields. For TLS the secret must contain a - certFile and keyFile, and/or caCert fields. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the HelmRepository. + For HTTP/S basic auth the secret must contain 'username' and 'password' + fields. + Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + keys is deprecated. Please use `.spec.certSecretRef` instead. properties: name: description: Name of the referent. @@ -102,94 +157,119 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + HelmRepository. type: boolean timeout: - default: 60s - description: The timeout of index downloading, defaults to 60s. + description: |- + Timeout is used for the index fetch operation for an HTTPS helm repository, + and for remote OCI Repository operations like pulling for an OCI helm + chart by the associated HelmChart. + Its default value is 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + type: + description: |- + Type of the HelmRepository. + When this field is set to "oci", the URL field value must be prefixed with "oci://". + enum: + - default + - oci type: string url: - description: The Helm repository URL, a valid URL contains at least - a protocol and host. + description: |- + URL of the Helm repository, a valid URL contains at least a protocol and + host. + pattern: ^(http|https|oci)://.*$ type: string required: - - interval - url type: object status: default: observedGeneration: -1 - description: HelmRepositoryStatus defines the observed state of the HelmRepository. + description: HelmRepositoryStatus records the observed state of the HelmRepository. properties: artifact: - description: Artifact represents the output of the last successful - repository sync. + description: Artifact represents the last successful HelmRepository + reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -204,10 +284,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -220,21 +296,27 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the HelmRepository + object. format: int64 type: integer url: - description: URL is the download link for the last index fetched. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + HelmRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -250,71 +332,128 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: HelmRepository is the Schema for the helmrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmRepositorySpec specifies the required configuration to - produce an Artifact for a Helm repository index YAML. + description: |- + HelmRepositorySpec specifies the required configuration to produce an + Artifact for a Helm repository index YAML. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array required: - namespaceSelectors type: object + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + It takes precedence over the values specified in the Secret referred + to by `.spec.secretRef`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + insecure: + description: |- + Insecure allows connecting to a non-TLS HTTP container registry. + This field is only taken into account if the .spec.type field is set to 'oci'. + type: boolean interval: - description: Interval at which to check the URL for updates. + description: |- + Interval at which the HelmRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string passCredentials: - description: PassCredentials allows the credentials from the SecretRef - to be passed on to a host that does not match the host as defined - in URL. This may be required if the host of the advertised chart - URLs in the index differ from the defined URL. Enabling this should - be done with caution, as it can potentially result in credentials - getting stolen in a MITM-attack. + description: |- + PassCredentials allows the credentials from the SecretRef to be passed + on to a host that does not match the host as defined in URL. + This may be required if the host of the advertised chart URLs in the + index differ from the defined URL. + Enabling this should be done with caution, as it can potentially result + in credentials getting stolen in a MITM-attack. type: boolean + provider: + default: generic + description: |- + Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the HelmRepository. For HTTP/S basic auth the secret - must contain 'username' and 'password' fields. For TLS the secret - must contain a 'certFile' and 'keyFile', and/or 'caCert' fields. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the HelmRepository. + For HTTP/S basic auth the secret must contain 'username' and 'password' + fields. + Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + keys is deprecated. Please use `.spec.certSecretRef` instead. properties: name: description: Name of the referent. @@ -323,26 +462,33 @@ spec: - name type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this HelmRepository. + description: |- + Suspend tells the controller to suspend the reconciliation of this + HelmRepository. type: boolean timeout: - default: 60s - description: Timeout of the index fetch operation, defaults to 60s. + description: |- + Timeout is used for the index fetch operation for an HTTPS helm repository, + and for remote OCI Repository operations like pulling for an OCI helm + chart by the associated HelmChart. + Its default value is 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string type: - description: Type of the HelmRepository. When this field is set to "oci", - the URL field value must be prefixed with "oci://". + description: |- + Type of the HelmRepository. + When this field is set to "oci", the URL field value must be prefixed with "oci://". enum: - default - oci type: string url: - description: URL of the Helm repository, a valid URL contains at least - a protocol and host. + description: |- + URL of the Helm repository, a valid URL contains at least a protocol and + host. + pattern: ^(http|https|oci)://.*$ type: string required: - - interval - url type: object status: @@ -354,12 +500,14 @@ spec: description: Artifact represents the last successful HelmRepository reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the Artifact file. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -368,70 +516,65 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -446,10 +589,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -462,29 +601,26 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the HelmRepository object. + description: |- + ObservedGeneration is the last observed generation of the HelmRepository + object. format: int64 type: integer url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise HelmRepositoryStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + HelmRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml index 5e214ccd8..05b7b96ab 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: ocirepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -31,20 +29,25 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta2 + name: v1 schema: openAPIV3Schema: description: OCIRepository is the Schema for the ocirepositories API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -52,13 +55,19 @@ spec: description: OCIRepositorySpec defines the desired state of OCIRepository properties: certSecretRef: - description: "CertSecretRef can be given the name of a secret containing - either or both of \n - a PEM-encoded client certificate (`certFile`) - and private key (`keyFile`); - a PEM-encoded CA certificate (`caFile`) - \n and whichever are supplied, will be used for connecting to the - \ registry. The client cert and key are useful if you are authenticating - with a certificate; the CA cert is useful if you are using a self-signed - server certificate." + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. properties: name: description: Name of the referent. @@ -67,44 +76,94 @@ spec: - name type: object ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP container + registry. + type: boolean interval: - description: The interval at which to check for image updates. + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string + layerSelector: + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object provider: default: generic - description: The provider used for authentication, can be 'aws', 'azure', - 'gcp' or 'generic'. When not specified, defaults to 'generic'. + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. enum: - generic - aws - azure - gcp type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object ref: - description: The OCI reference to pull and monitor for changes, defaults - to the latest tag. + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. properties: digest: - description: Digest is the image digest to pull, takes precedence - over SemVer. The value should be in the format 'sha256:'. + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. type: string semver: - description: SemVer is the range of tags to pull selecting the - latest within the range, takes precedence over Tag. + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. type: string tag: description: Tag is the image tag to pull, defaults to latest. type: string type: object secretRef: - description: SecretRef contains the secret name containing the registry - login credentials to resolve image metadata. The secret must be - of type kubernetes.io/dockerconfigjson. + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. properties: name: description: Name of the referent. @@ -113,9 +172,10 @@ spec: - name type: object serviceAccountName: - description: 'ServiceAccountName is the name of the Kubernetes ServiceAccount - used to authenticate the image pull if the service account has attached - pull secrets. For more information: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account' + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account type: string suspend: description: This flag tells the controller to suspend the reconciliation @@ -125,12 +185,70 @@ spec: default: 60s description: The timeout for remote OCI Repository operations like pulling, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string url: - description: URL is a reference to an OCI artifact repository hosted + description: |- + URL is a reference to an OCI artifact repository hosted on a remote container registry. pattern: ^oci://.*$ type: string + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object required: - interval - url @@ -144,12 +262,14 @@ spec: description: Artifact represents the output of the last successful OCI Repository sync. properties: - checksum: - description: Checksum is the SHA256 checksum of the Artifact file. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -158,70 +278,65 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the OCIRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -236,10 +351,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -252,14 +363,42 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedLayerSelector: + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object url: description: URL is the download link for the artifact output of the last OCI Repository sync. @@ -270,9 +409,415 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1 + name: v1beta2 + schema: + openAPIV3Schema: + description: OCIRepository is the Schema for the ocirepositories API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OCIRepositorySpec defines the desired state of OCIRepository + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + Note: Support for the `caFile`, `certFile` and `keyFile` keys have + been deprecated. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP container + registry. + type: boolean + interval: + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + layerSelector: + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + provider: + default: generic + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ref: + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. + properties: + digest: + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. + type: string + semver: + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. + type: string + tag: + description: Tag is the image tag to pull, defaults to latest. + type: string + type: object + secretRef: + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + type: string + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote OCI Repository operations like + pulling, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + url: + description: |- + URL is a reference to an OCI artifact repository hosted + on a remote container registry. + pattern: ^oci://.*$ + type: string + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: OCIRepositoryStatus defines the observed state of OCIRepository + properties: + artifact: + description: Artifact represents the output of the last successful + OCI Repository sync. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the OCIRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + contentConfigChecksum: + description: |- + ContentConfigChecksum is a checksum of all the configurations related to + the content of the source artifact: + - .spec.ignore + - .spec.layerSelector + observed in .status.observedGeneration version of the object. This can + be used to determine if the content configuration has changed and the + artifact needs to be rebuilt. + It has the format of `:`, for example: `sha256:`. + + Deprecated: Replaced with explicit fields for observed artifact content + config in the status. + type: string + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedLayerSelector: + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + url: + description: URL is the download link for the artifact output of the + last OCI Repository sync. + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c00716353..2a09dbfd5 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,4 +6,5 @@ resources: - bases/source.toolkit.fluxcd.io_helmcharts.yaml - bases/source.toolkit.fluxcd.io_buckets.yaml - bases/source.toolkit.fluxcd.io_ocirepositories.yaml +- bases/source.toolkit.fluxcd.io_externalartifacts.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manager/deployment.yaml b/config/manager/deployment.yaml index 3ea2c59ff..e354b00e3 100644 --- a/config/manager/deployment.yaml +++ b/config/manager/deployment.yaml @@ -51,6 +51,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: TUF_ROOT # store the Fulcio root CA file in tmp + value: "/tmp/.sigstore" args: - --watch-all-namespaces - --log-level=info diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 7a7a6f472..0118ce85b 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,4 +6,4 @@ resources: images: - name: fluxcd/source-controller newName: fluxcd/source-controller - newTag: v0.26.1 + newTag: v1.7.0 diff --git a/config/rbac/externalartifact_editor_role.yaml b/config/rbac/externalartifact_editor_role.yaml new file mode 100644 index 000000000..ded6c1d93 --- /dev/null +++ b/config/rbac/externalartifact_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifact-editor-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/externalartifact_viewer_role.yaml b/config/rbac/externalartifact_viewer_role.yaml new file mode 100644 index 000000000..d0c1d507f --- /dev/null +++ b/config/rbac/externalartifact_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifacts-viewer-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a048672d6..d2cd9e7cb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,9 +1,7 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: @@ -17,133 +15,24 @@ rules: - "" resources: - secrets + - serviceaccounts verbs: - get - list - watch - apiGroups: - - source.toolkit.fluxcd.io + - "" resources: - - buckets + - serviceaccounts/token verbs: - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/status - verbs: - - get - - patch - - update - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets - gitrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmcharts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - ocirepositories verbs: - create @@ -156,6 +45,10 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/finalizers + - gitrepositories/finalizers + - helmcharts/finalizers + - helmrepositories/finalizers - ocirepositories/finalizers verbs: - create @@ -166,6 +59,10 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/status + - gitrepositories/status + - helmcharts/status + - helmrepositories/status - ocirepositories/status verbs: - get diff --git a/config/samples/source_v1beta2_bucket.yaml b/config/samples/source_v1_bucket.yaml similarity index 81% rename from config/samples/source_v1beta2_bucket.yaml rename to config/samples/source_v1_bucket.yaml index cbc211aa6..f09cbe213 100644 --- a/config/samples/source_v1beta2_bucket.yaml +++ b/config/samples/source_v1_bucket.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: bucket-sample diff --git a/config/samples/source_v1beta2_gitrepository.yaml b/config/samples/source_v1_gitrepository.yaml similarity index 77% rename from config/samples/source_v1beta2_gitrepository.yaml rename to config/samples/source_v1_gitrepository.yaml index f22674600..27fad9a25 100644 --- a/config/samples/source_v1beta2_gitrepository.yaml +++ b/config/samples/source_v1_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: gitrepository-sample diff --git a/config/samples/source_v1beta2_helmchart_gitrepository.yaml b/config/samples/source_v1_helmchart_gitrepository.yaml similarity index 78% rename from config/samples/source_v1beta2_helmchart_gitrepository.yaml rename to config/samples/source_v1_helmchart_gitrepository.yaml index 731d8d21b..680e7b184 100644 --- a/config/samples/source_v1beta2_helmchart_gitrepository.yaml +++ b/config/samples/source_v1_helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-git-sample diff --git a/config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml b/config/samples/source_v1_helmchart_helmrepository-oci.yaml similarity index 82% rename from config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml rename to config/samples/source_v1_helmchart_helmrepository-oci.yaml index d2cdc15c6..d9dd3279d 100644 --- a/config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml +++ b/config/samples/source_v1_helmchart_helmrepository-oci.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-sample-oci diff --git a/config/samples/source_v1beta2_helmchart_helmrepository.yaml b/config/samples/source_v1_helmchart_helmrepository.yaml similarity index 63% rename from config/samples/source_v1beta2_helmchart_helmrepository.yaml rename to config/samples/source_v1_helmchart_helmrepository.yaml index a6bd7c207..d1b43fe3e 100644 --- a/config/samples/source_v1beta2_helmchart_helmrepository.yaml +++ b/config/samples/source_v1_helmchart_helmrepository.yaml @@ -1,11 +1,12 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-sample spec: chart: podinfo - version: '>=2.0.0 <3.0.0' + version: '6.x' sourceRef: kind: HelmRepository name: helmrepository-sample interval: 1m + ignoreMissingValuesFiles: true diff --git a/config/samples/source_v1beta2_helmrepository-oci.yaml b/config/samples/source_v1_helmrepository-oci.yaml similarity index 72% rename from config/samples/source_v1beta2_helmrepository-oci.yaml rename to config/samples/source_v1_helmrepository-oci.yaml index bc487c990..458dc73c2 100644 --- a/config/samples/source_v1beta2_helmrepository-oci.yaml +++ b/config/samples/source_v1_helmrepository-oci.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: helmrepository-sample-oci diff --git a/config/samples/source_v1beta2_helmrepository.yaml b/config/samples/source_v1_helmrepository.yaml similarity index 73% rename from config/samples/source_v1beta2_helmrepository.yaml rename to config/samples/source_v1_helmrepository.yaml index 4a2c7ab36..b7049cc0a 100644 --- a/config/samples/source_v1beta2_helmrepository.yaml +++ b/config/samples/source_v1_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: helmrepository-sample diff --git a/config/samples/source_v1beta2_ocirepository.yaml b/config/samples/source_v1_ocirepository.yaml similarity index 77% rename from config/samples/source_v1beta2_ocirepository.yaml rename to config/samples/source_v1_ocirepository.yaml index e06241b97..69fb19e2a 100644 --- a/config/samples/source_v1beta2_ocirepository.yaml +++ b/config/samples/source_v1_ocirepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: ocirepository-sample diff --git a/config/testdata/bucket/source.yaml b/config/testdata/bucket/source.yaml index 459e7400a..bd3097ee2 100644 --- a/config/testdata/bucket/source.yaml +++ b/config/testdata/bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: podinfo diff --git a/config/testdata/git/large-repo.yaml b/config/testdata/git/large-repo.yaml index 139b44415..ad3defd68 100644 --- a/config/testdata/git/large-repo.yaml +++ b/config/testdata/git/large-repo.yaml @@ -1,29 +1,10 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: - name: large-repo-go-git + name: large-repo spec: - gitImplementation: go-git interval: 10m timeout: 2m - url: https://github.com/hashgraph/hedera-mirror-node.git + url: https://github.com/nodejs/node.git ref: branch: main - ignore: | - /* - !/charts ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: large-repo-libgit2 -spec: - gitImplementation: libgit2 - interval: 10m - timeout: 2m - url: https://github.com/hashgraph/hedera-mirror-node.git - ref: - branch: main - ignore: | - /* - !/charts diff --git a/config/testdata/helmchart-from-bucket/source.yaml b/config/testdata/helmchart-from-bucket/source.yaml index 0609cf541..814305d13 100644 --- a/config/testdata/helmchart-from-bucket/source.yaml +++ b/config/testdata/helmchart-from-bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: charts @@ -13,7 +13,7 @@ spec: secretRef: name: minio-credentials --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-bucket diff --git a/config/testdata/helmchart-from-oci/notation.yaml b/config/testdata/helmchart-from-oci/notation.yaml new file mode 100644 index 000000000..6434479ea --- /dev/null +++ b/config/testdata/helmchart-from-oci/notation.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo-notation +spec: + url: oci://ghcr.io/stefanprodan/charts + type: "oci" + interval: 1m +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo-notation +spec: + chart: podinfo + sourceRef: + kind: HelmRepository + name: podinfo-notation + version: '6.6.0' + interval: 1m + verify: + provider: notation + secretRef: + name: notation-config diff --git a/config/testdata/helmchart-from-oci/source.yaml b/config/testdata/helmchart-from-oci/source.yaml index 9d9945ff6..b2786531e 100644 --- a/config/testdata/helmchart-from-oci/source.yaml +++ b/config/testdata/helmchart-from-oci/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo @@ -8,7 +8,7 @@ spec: type: "oci" interval: 1m --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -19,3 +19,17 @@ spec: name: podinfo version: '6.1.*' interval: 1m +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo-keyless +spec: + chart: podinfo + sourceRef: + kind: HelmRepository + name: podinfo + version: '6.2.1' + interval: 1m + verify: + provider: cosign diff --git a/config/testdata/helmchart-valuesfile/gitrepository.yaml b/config/testdata/helmchart-valuesfile/gitrepository.yaml index b620c8560..279979e93 100644 --- a/config/testdata/helmchart-valuesfile/gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: podinfo diff --git a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml index 4483f0ca8..3c26b3eb5 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo-git @@ -8,6 +8,5 @@ spec: kind: GitRepository name: podinfo chart: charts/podinfo - valuesFile: charts/podinfo/values.yaml valuesFiles: - charts/podinfo/values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml index fdf34f6bf..0b004eb7a 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -8,6 +8,5 @@ spec: kind: HelmRepository name: podinfo chart: podinfo - valuesFile: values.yaml valuesFiles: - values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmrepository.yaml index ab568384c..f0c178695 100644 --- a/config/testdata/helmchart-valuesfile/helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo diff --git a/config/testdata/ocirepository/signed-with-key.yaml b/config/testdata/ocirepository/signed-with-key.yaml new file mode 100644 index 000000000..0a3a652ee --- /dev/null +++ b/config/testdata/ocirepository/signed-with-key.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-key +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/podinfo-deploy + ref: + semver: "6.2.x" + verify: + provider: cosign + secretRef: + name: cosign-key diff --git a/config/testdata/ocirepository/signed-with-keyless.yaml b/config/testdata/ocirepository/signed-with-keyless.yaml new file mode 100644 index 000000000..ff46ed30d --- /dev/null +++ b/config/testdata/ocirepository/signed-with-keyless.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-keyless +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + semver: "6.2.x" + verify: + provider: cosign diff --git a/config/testdata/ocirepository/signed-with-notation.yaml b/config/testdata/ocirepository/signed-with-notation.yaml new file mode 100644 index 000000000..55820f6d4 --- /dev/null +++ b/config/testdata/ocirepository/signed-with-notation.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-notation +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/podinfo-deploy + ref: + semver: "6.6.x" + verify: + provider: notation + secretRef: + name: notation-config diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go deleted file mode 100644 index 37cc33d91..000000000 --- a/controllers/bucket_controller_test.go +++ /dev/null @@ -1,1282 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/darkowlzz/controller-check/status" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs" - s3mock "github.com/fluxcd/source-controller/internal/mock/s3" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/internal/reconcile/summarize" -) - -// Environment variable to set the GCP Storage host for the GCP client. -const EnvGcpStorageHost = "STORAGE_EMULATOR_HOST" - -func TestBucketReconciler_Reconcile(t *testing.T) { - g := NewWithT(t) - - s3Server := s3mock.NewServer("test-bucket") - s3Server.Objects = []*s3mock.Object{ - { - Key: "test.yaml", - Content: []byte("test"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - } - s3Server.Start() - defer s3Server.Stop() - - g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) - u, err := url.Parse(s3Server.HTTPAddress()) - g.Expect(err).NotTo(HaveOccurred()) - - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "bucket-reconcile-", - Namespace: "default", - }, - Data: map[string][]byte{ - "accesskey": []byte("key"), - "secretkey": []byte("secret"), - }, - } - g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) - defer testEnv.Delete(ctx, secret) - - obj := &sourcev1.Bucket{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "bucket-reconcile-", - Namespace: "default", - }, - Spec: sourcev1.BucketSpec{ - Provider: "generic", - BucketName: s3Server.BucketName, - Endpoint: u.Host, - Insecure: true, - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - SecretRef: &meta.LocalObjectReference{ - Name: secret.Name, - }, - }, - } - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for Bucket to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) || obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: bucketReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - uo, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(uo) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Patch the object with reconcile request annotation. - patchHelper, err := patch.NewHelper(obj, testEnv.Client) - g.Expect(err).ToNot(HaveOccurred()) - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return obj.Status.LastHandledReconcileAt == "now" - }, timeout).Should(BeTrue()) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for Bucket to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) -} - -func TestBucketReconciler_reconcileStorage(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error - want sreconcile.Result - wantErr bool - assertArtifact *sourcev1.Artifact - assertConditions []metav1.Condition - assertPaths []string - }{ - { - name: "garbage collects", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { - revisions := []string{"a", "b", "c", "d"} - for n := range revisions { - v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), - Revision: v, - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { - return err - } - if n != len(revisions)-1 { - time.Sleep(time.Second * 1) - } - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/d.txt", - Revision: "d", - Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", - URL: testStorage.Hostname + "/reconcile-storage/d.txt", - Size: int64p(int64(len("d"))), - }, - assertPaths: []string{ - "/reconcile-storage/d.txt", - "/reconcile-storage/c.txt", - "!/reconcile-storage/b.txt", - "!/reconcile-storage/a.txt", - }, - want: sreconcile.ResultSuccess, - }, - { - name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), - Revision: "d", - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "!/reconcile-storage/invalid.txt", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), - }, - }, - { - name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: "http://outdated.com/reconcile-storage/hostname.txt", - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { - return err - } - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "/reconcile-storage/hostname.txt", - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", - Size: int64p(int64(len("file"))), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - defer func() { - g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) - }() - - r := &BucketReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.Bucket{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - }, - } - if tt.beforeFunc != nil { - g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) - } - - index := newEtagIndex() - - got, err := r.reconcileStorage(context.TODO(), obj, index, "") - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) - if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { - g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) - } - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - for _, p := range tt.assertPaths { - absoluteP := filepath.Join(testStorage.BasePath, p) - if !strings.HasPrefix(p, "!") { - g.Expect(absoluteP).To(BeAnExistingFile()) - continue - } - g.Expect(absoluteP).NotTo(BeAnExistingFile()) - } - }) - } -} - -func TestBucketReconciler_reconcileSource_generic(t *testing.T) { - tests := []struct { - name string - bucketName string - bucketObjects []*s3mock.Object - middleware http.Handler - secret *corev1.Secret - beforeFunc func(obj *sourcev1.Bucket) - want sreconcile.Result - wantErr bool - assertIndex *etagIndex - assertConditions []metav1.Condition - }{ - { - name: "Reconciles GCS source", - bucketName: "dummy", - bucketObjects: []*s3mock.Object{ - { - Key: "test.txt", - Content: []byte("test"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "test.txt": "098f6bcd4621d373cade4e832627b4f6", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - }, - }, - // TODO(hidde): middleware for mock server - //{ - // name: "authenticates using secretRef", - // bucketName: "dummy", - //}, - { - name: "Observes non-existing secretRef", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "dummy", - } - }, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), - }, - }, - { - name: "Observes invalid secretRef", - bucketName: "dummy", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - }, - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "dummy", - } - }, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields 'accesskey' and 'secretkey'"), - }, - }, - { - name: "Observes non-existing bucket name", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.BucketName = "invalid" - }, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), - }, - }, - { - name: "Transient bucket name API failure", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.Endpoint = "transient.example.com" - obj.Spec.BucketName = "unavailable" - }, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), - }, - }, - { - name: ".sourceignore", - bucketName: "dummy", - bucketObjects: []*s3mock.Object{ - { - Key: ".sourceignore", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - { - Key: "ignored/file.txt", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - { - Key: "included/file.txt", - Content: []byte("included/file.txt"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), - }, - }, - { - name: "spec.ignore overrides .sourceignore", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - ignore := "!ignored/file.txt" - obj.Spec.Ignore = &ignore - }, - bucketObjects: []*s3mock.Object{ - { - Key: ".sourceignore", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - { - Key: "ignored/file.txt", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - { - Key: "included/file.txt", - Content: []byte("included/file.txt"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "ignored/file.txt": "f08907038338288420ae7dc2d30c0497", - "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), - }, - }, - { - name: "Up-to-date artifact", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: "b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", - } - }, - bucketObjects: []*s3mock.Object{ - { - Key: "test.txt", - Content: []byte("test"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "test.txt": "098f6bcd4621d373cade4e832627b4f6", - }, - }, - assertConditions: []metav1.Condition{}, - }, - { - name: "Removes FetchFailedCondition after reconciling source", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") - }, - bucketObjects: []*s3mock.Object{ - { - Key: "test.txt", - Content: []byte("test"), - ContentType: "text/plain", - LastModified: time.Now(), - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "test.txt": "098f6bcd4621d373cade4e832627b4f6", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) - if tt.secret != nil { - builder.WithObjects(tt.secret) - } - r := &BucketReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Client: builder.Build(), - Storage: testStorage, - } - tmpDir := t.TempDir() - - obj := &sourcev1.Bucket{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.BucketKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-bucket", - }, - Spec: sourcev1.BucketSpec{ - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - var server *s3mock.Server - if tt.bucketName != "" { - server = s3mock.NewServer(tt.bucketName) - server.Objects = tt.bucketObjects - server.Start() - defer server.Stop() - - g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) - u, err := url.Parse(server.HTTPAddress()) - g.Expect(err).NotTo(HaveOccurred()) - - obj.Spec.BucketName = tt.bucketName - obj.Spec.Endpoint = u.Host - // TODO(hidde): also test TLS - obj.Spec.Insecure = true - } - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - index := newEtagIndex() - - got, err := r.reconcileSource(context.TODO(), obj, index, tmpDir) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { - tests := []struct { - name string - bucketName string - bucketObjects []*gcsmock.Object - secret *corev1.Secret - beforeFunc func(obj *sourcev1.Bucket) - want sreconcile.Result - wantErr bool - assertIndex *etagIndex - assertConditions []metav1.Condition - }{ - { - name: "Reconciles GCS source", - bucketName: "dummy", - bucketObjects: []*gcsmock.Object{ - { - Key: "test.txt", - ContentType: "text/plain", - Content: []byte("test"), - Generation: 3, - }, - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "accesskey": []byte("key"), - "secretkey": []byte("secret"), - "serviceaccount": []byte("testsa"), - }, - }, - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "dummy", - } - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "test.txt": "098f6bcd4621d373cade4e832627b4f6", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - }, - }, - { - name: "Observes non-existing secretRef", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "dummy", - } - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), - }, - }, - { - name: "Observes invalid secretRef", - bucketName: "dummy", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - }, - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "dummy", - } - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields"), - }, - }, - { - name: "Observes non-existing bucket name", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.BucketName = "invalid" - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), - }, - }, - { - name: "Transient bucket name API failure", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Spec.Endpoint = "transient.example.com" - obj.Spec.BucketName = "unavailable" - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertIndex: newEtagIndex(), - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), - }, - }, - { - name: ".sourceignore", - bucketName: "dummy", - bucketObjects: []*gcsmock.Object{ - { - Key: ".sourceignore", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - Generation: 1, - }, - { - Key: "ignored/file.txt", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - Generation: 4, - }, - { - Key: "included/file.txt", - Content: []byte("included/file.txt"), - ContentType: "text/plain", - Generation: 3, - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), - }, - }, - { - name: "spec.ignore overrides .sourceignore", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - ignore := "!ignored/file.txt" - obj.Spec.Ignore = &ignore - }, - bucketObjects: []*gcsmock.Object{ - { - Key: ".sourceignore", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - Generation: 1, - }, - { - Key: "ignored/file.txt", - Content: []byte("ignored/file.txt"), - ContentType: "text/plain", - Generation: 2, - }, - { - Key: "included/file.txt", - Content: []byte("included/file.txt"), - ContentType: "text/plain", - Generation: 4, - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "ignored/file.txt": "f08907038338288420ae7dc2d30c0497", - "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), - }, - }, - { - name: "Up-to-date artifact", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: "b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", - } - }, - bucketObjects: []*gcsmock.Object{ - { - Key: "test.txt", - Content: []byte("test"), - ContentType: "text/plain", - Generation: 2, - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "test.txt": "098f6bcd4621d373cade4e832627b4f6", - }, - }, - assertConditions: []metav1.Condition{}, - }, - { - name: "Removes FetchFailedCondition after reconciling source", - bucketName: "dummy", - beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") - }, - bucketObjects: []*gcsmock.Object{ - { - Key: "test.txt", - Content: []byte("test"), - ContentType: "text/plain", - Generation: 2, - }, - }, - want: sreconcile.ResultSuccess, - assertIndex: &etagIndex{ - index: map[string]string{ - "test.txt": "098f6bcd4621d373cade4e832627b4f6", - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), - }, - }, - // TODO: Middleware for mock server to test authentication using secret. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) - if tt.secret != nil { - builder.WithObjects(tt.secret) - } - r := &BucketReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Client: builder.Build(), - Storage: testStorage, - } - tmpDir := t.TempDir() - - // Test bucket object. - obj := &sourcev1.Bucket{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.BucketKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-bucket", - }, - Spec: sourcev1.BucketSpec{ - BucketName: tt.bucketName, - Timeout: &metav1.Duration{Duration: timeout}, - Provider: sourcev1.GoogleBucketProvider, - }, - } - - // Set up the mock GCP bucket server. - server := gcsmock.NewServer(tt.bucketName) - server.Objects = tt.bucketObjects - server.Start() - defer server.Stop() - - g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) - - obj.Spec.Endpoint = server.HTTPAddress() - obj.Spec.Insecure = true - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - // Set the GCP storage host to be used by the GCP client. - g.Expect(os.Setenv(EnvGcpStorageHost, obj.Spec.Endpoint)).ToNot(HaveOccurred()) - defer func() { - g.Expect(os.Unsetenv(EnvGcpStorageHost)).ToNot(HaveOccurred()) - }() - - index := newEtagIndex() - - got, err := r.reconcileSource(context.TODO(), obj, index, tmpDir) - t.Log(err) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestBucketReconciler_reconcileArtifact(t *testing.T) { - tests := []struct { - name string - beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) - afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "Archiving artifact to storage makes ArtifactInStorage=True", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), - }, - }, - { - name: "Up-to-date artifact should not persist and update status", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) { - revision, _ := index.Revision() - obj.Spec.Interval = metav1.Duration{Duration: interval} - // Incomplete artifact - obj.Status.Artifact = &sourcev1.Artifact{Revision: revision} - }, - afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { - // Still incomplete - t.Expect(obj.Status.URL).To(BeEmpty()) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), - }, - }, - { - name: "Removes ArtifactOutdatedCondition after creating a new artifact", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), - }, - }, - { - name: "Creates latest symlink to the created artifact", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { - localPath := testStorage.LocalPath(*obj.GetArtifact()) - symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") - targetFile, err := os.Readlink(symlinkPath) - t.Expect(err).NotTo(HaveOccurred()) - t.Expect(localPath).To(Equal(targetFile)) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), - }, - }, - { - name: "Dir path deleted", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) { - t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path"), - }, - }, - { - name: "Dir path is not a directory", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) { - // Remove the given directory and create a file for the same - // path. - t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) - f, err := os.Create(dir) - defer f.Close() - t.Expect(err).ToNot(HaveOccurred()) - }, - afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { - t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "is not a directory"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &BucketReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - tmpDir := t.TempDir() - - obj := &sourcev1.Bucket{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.BucketKind, - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-bucket-", - Generation: 1, - Namespace: "default", - }, - Spec: sourcev1.BucketSpec{ - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - index := newEtagIndex() - - if tt.beforeFunc != nil { - tt.beforeFunc(g, obj, index, tmpDir) - } - - got, err := r.reconcileArtifact(context.TODO(), obj, index, tmpDir) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - // On error, artifact is empty. Check artifacts only on successful - // reconcile. - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - if tt.afterFunc != nil { - tt.afterFunc(g, obj, tmpDir) - } - }) - } -} - -func Test_etagIndex_Revision(t *testing.T) { - tests := []struct { - name string - list map[string]string - want string - wantErr bool - }{ - { - name: "index with items", - list: map[string]string{ - "one": "one", - "two": "two", - "three": "three", - }, - want: "c0837b3f32bb67c5275858fdb96595f87801cf3c2f622c049918a051d29b2c7f", - }, - { - name: "index with items in different order", - list: map[string]string{ - "three": "three", - "one": "one", - "two": "two", - }, - want: "c0837b3f32bb67c5275858fdb96595f87801cf3c2f622c049918a051d29b2c7f", - }, - { - name: "empty index", - list: map[string]string{}, - want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - }, - { - name: "nil index", - list: nil, - want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - index := &etagIndex{index: tt.list} - got, err := index.Revision() - if (err != nil) != tt.wantErr { - t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("revision() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestBucketReconciler_statusConditions(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.Bucket) - assertConditions []metav1.Condition - }{ - { - name: "positive conditions only", - beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - { - name: "multiple failures", - beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), - }, - }, - { - name: "mixed positive and negative conditions", - beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - obj := &sourcev1.Bucket{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.BucketKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "bucket", - Namespace: "foo", - }, - } - clientBuilder := fake.NewClientBuilder() - clientBuilder.WithObjects(obj) - c := clientBuilder.Build() - - patchHelper, err := patch.NewHelper(obj, c) - g.Expect(err).ToNot(HaveOccurred()) - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - ctx := context.TODO() - recResult := sreconcile.ResultSuccess - var retErr error - - summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper) - summarizeOpts := []summarize.Option{ - summarize.WithConditions(bucketReadyCondition), - summarize.WithReconcileResult(recResult), - summarize.WithReconcileError(retErr), - summarize.WithIgnoreNotFound(), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), - summarize.WithPatchFieldOwner("source-controller"), - } - _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - - key := client.ObjectKeyFromObject(obj) - g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) - g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestBucketReconciler_notify(t *testing.T) { - tests := []struct { - name string - res sreconcile.Result - resErr error - oldObjBeforeFunc func(obj *sourcev1.Bucket) - newObjBeforeFunc func(obj *sourcev1.Bucket) - wantEvent string - }{ - { - name: "error - no event", - res: sreconcile.ResultEmpty, - resErr: errors.New("some error"), - }, - { - name: "new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - }, - wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", - }, - { - name: "recovery from failure", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal Succeeded stored artifact with 2 fetched files from", - }, - { - name: "recovery and new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", - }, - { - name: "no updates", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - recorder := record.NewFakeRecorder(32) - - oldObj := &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - BucketName: "test-bucket", - }, - } - newObj := oldObj.DeepCopy() - - if tt.oldObjBeforeFunc != nil { - tt.oldObjBeforeFunc(oldObj) - } - if tt.newObjBeforeFunc != nil { - tt.newObjBeforeFunc(newObj) - } - - reconciler := &BucketReconciler{ - EventRecorder: recorder, - } - index := &etagIndex{ - index: map[string]string{ - "zzz": "qqq", - "bbb": "ddd", - }, - } - reconciler.notify(ctx, oldObj, newObj, index, tt.res, tt.resErr) - - select { - case x, ok := <-recorder.Events: - g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") - if tt.wantEvent != "" { - g.Expect(x).To(ContainSubstring(tt.wantEvent)) - } - default: - if tt.wantEvent != "" { - t.Errorf("expected some event to be emitted") - } - } - }) - } -} diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go deleted file mode 100644 index 64f651efa..000000000 --- a/controllers/gitrepository_controller.go +++ /dev/null @@ -1,977 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/fluxcd/pkg/runtime/logger" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - kuberecorder "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" - helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/patch" - "github.com/fluxcd/pkg/runtime/predicates" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - serror "github.com/fluxcd/source-controller/internal/error" - "github.com/fluxcd/source-controller/internal/features" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/internal/util" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/strategy" - "github.com/fluxcd/source-controller/pkg/sourceignore" -) - -// gitRepositoryReadyCondition contains the information required to summarize a -// v1beta2.GitRepository Ready Condition. -var gitRepositoryReadyCondition = summarize.Conditions{ - Target: meta.ReadyCondition, - Owned: []string{ - sourcev1.StorageOperationFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactInStorageCondition, - sourcev1.SourceVerifiedCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - Summarize: []string{ - sourcev1.StorageOperationFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactInStorageCondition, - sourcev1.SourceVerifiedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, - }, - NegativePolarity: []string{ - sourcev1.StorageOperationFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, - }, -} - -// gitRepositoryFailConditions contains the conditions that represent a failure. -var gitRepositoryFailConditions = []string{ - sourcev1.FetchFailedCondition, - sourcev1.IncludeUnavailableCondition, - sourcev1.StorageOperationFailedCondition, -} - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -// GitRepositoryReconciler reconciles a v1beta2.GitRepository object. -type GitRepositoryReconciler struct { - client.Client - kuberecorder.EventRecorder - helper.Metrics - - Storage *Storage - ControllerName string - // Libgit2TransportInitialized lets the reconciler know whether - // libgit2 transport was intialized successfully. - Libgit2TransportInitialized func() bool - - requeueDependency time.Duration - features map[string]bool -} - -type GitRepositoryReconcilerOptions struct { - MaxConcurrentReconciles int - DependencyRequeueInterval time.Duration - RateLimiter ratelimiter.RateLimiter -} - -// gitRepositoryReconcileFunc is the function type for all the -// v1beta2.GitRepository (sub)reconcile functions. -type gitRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) - -func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) -} - -func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error { - r.requeueDependency = opts.DependencyRequeueInterval - - if r.features == nil { - r.features = map[string]bool{} - } - - // Check and enable gated features. - if oc, _ := features.Enabled(features.OptimizedGitClones); oc { - r.features[features.OptimizedGitClones] = true - } - - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.GitRepository{}, builder.WithPredicates( - predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: opts.MaxConcurrentReconciles, - RateLimiter: opts.RateLimiter, - RecoverPanic: true, - }). - Complete(r) -} - -func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { - start := time.Now() - log := ctrl.LoggerFrom(ctx). - // Sets a reconcile ID to correlate logs from all suboperations. - WithValues("reconcileID", uuid.NewUUID()) - - // logger will be associated to the new context that is - // returned from ctrl.LoggerInto. - ctx = ctrl.LoggerInto(ctx, log) - - // Fetch the GitRepository - obj := &sourcev1.GitRepository{} - if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Record suspended status metric - r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - - // Return early if the object is suspended - if obj.Spec.Suspend { - log.Info("reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // Initialize the patch helper with the current version of the object. - patchHelper, err := patch.NewHelper(obj, r.Client) - if err != nil { - return ctrl.Result{}, err - } - - // recResult stores the abstracted reconcile result. - var recResult sreconcile.Result - - // Always attempt to patch the object and status after each reconciliation - // NOTE: The final runtime result and error are set in this block. - defer func() { - summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) - summarizeOpts := []summarize.Option{ - summarize.WithConditions(gitRepositoryReadyCondition), - summarize.WithReconcileResult(recResult), - summarize.WithReconcileError(retErr), - summarize.WithIgnoreNotFound(), - summarize.WithProcessors( - summarize.ErrorActionHandler, - summarize.RecordReconcileReq, - ), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), - summarize.WithPatchFieldOwner(r.ControllerName), - } - result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - - // Always record readiness and duration metrics - r.Metrics.RecordReadiness(ctx, obj) - r.Metrics.RecordDuration(ctx, obj, start) - }() - - // Add finalizer first if not exist to avoid the race condition - // between init and delete - if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) - recResult = sreconcile.ResultRequeue - return - } - - // Examine if the object is under deletion - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - recResult, retErr = r.reconcileDelete(ctx, obj) - return - } - - // Reconcile actual object - reconcilers := []gitRepositoryReconcileFunc{ - r.reconcileStorage, - r.reconcileSource, - r.reconcileInclude, - r.reconcileArtifact, - } - recResult, retErr = r.reconcile(ctx, obj, reconcilers) - return -} - -// reconcile iterates through the gitRepositoryReconcileFunc tasks for the -// object. It returns early on the first call that returns -// reconcile.ResultRequeue, or produces an error. -func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository, reconcilers []gitRepositoryReconcileFunc) (sreconcile.Result, error) { - oldObj := obj.DeepCopy() - - // Mark as reconciling if generation differs - if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) - } - - // Create temp dir for Git clone - tmpDir, err := util.TempDirForObj("", obj) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to create temporary working directory: %w", err), - sourcev1.DirCreationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - defer func() { - if err = os.RemoveAll(tmpDir); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") - } - }() - conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) - - // Run the sub-reconcilers and build the result of reconciliation. - var ( - commit git.Commit - includes artifactSet - - res sreconcile.Result - resErr error - ) - for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, &commit, &includes, tmpDir) - // Exit immediately on ResultRequeue. - if recResult == sreconcile.ResultRequeue { - return sreconcile.ResultRequeue, nil - } - // If an error is received, prioritize the returned results because an - // error also means immediate requeue. - if err != nil { - resErr = err - res = recResult - break - } - // Prioritize requeue request in the result. - res = sreconcile.LowestRequeuingResult(res, recResult) - } - - r.notify(ctx, oldObj, obj, commit, res, resErr) - - return res, resErr -} - -// notify emits notification related to the result of reconciliation. -func (r *GitRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.GitRepository, commit git.Commit, res sreconcile.Result, resErr error) { - // Notify successful reconciliation for new artifact, no-op reconciliation - // and recovery from any failure. - if r.shouldNotify(oldObj, newObj, res, resErr) { - annotations := map[string]string{ - sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision, - sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum, - } - - var oldChecksum string - if oldObj.GetArtifact() != nil { - oldChecksum = oldObj.GetArtifact().Checksum - } - - // A partial commit due to no-op clone doesn't contain the commit - // message information. Have separate message for it. - var message string - if git.IsConcreteCommit(commit) { - message = fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage()) - } else { - message = fmt.Sprintf("stored artifact for commit '%s'", commit.String()) - } - - // Notify on new artifact and failure recovery. - if oldChecksum != newObj.GetArtifact().Checksum { - r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, - "NewArtifact", message) - ctrl.LoggerFrom(ctx).Info(message) - } else { - if sreconcile.FailureRecovery(oldObj, newObj, gitRepositoryFailConditions) { - r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, - meta.SucceededReason, message) - ctrl.LoggerFrom(ctx).Info(message) - } - } - } -} - -// shouldNotify analyzes the result of subreconcilers and determines if a -// notification should be sent. It decides about the final informational -// notifications after the reconciliation. Failure notification and in-line -// notifications are not handled here. -func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepository, res sreconcile.Result, resErr error) bool { - // Notify for successful reconciliation. - if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { - return true - } - // Notify for no-op reconciliation with ignore error. - if resErr != nil && res == sreconcile.ResultEmpty && newObj.Status.Artifact != nil { - // Convert to Generic error and check for ignore. - if ge, ok := resErr.(*serror.Generic); ok { - return ge.Ignore == true - } - } - return false -} - -// reconcileStorage ensures the current state of the storage matches the -// desired and previously observed state. -// -// The garbage collection is executed based on the flag configured settings and -// may remove files that are beyond their TTL or the maximum number of files -// to survive a collection cycle. -// If the Artifact in the Status of the object disappeared from the Storage, -// it is removed from the object. -// If the object does not have an Artifact in its Status, a Reconciling -// condition is added. -// The hostname of any URL in the Status of the object are updated, to ensure -// they match the Storage server hostname of current runtime. -func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, - obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) { - // Garbage collect previous advertised artifact(s) from storage - _ = r.garbageCollect(ctx, obj) - - // Determine if the advertised artifact is still in storage - if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { - obj.Status.Artifact = nil - obj.Status.URL = "" - // Remove the condition as the artifact doesn't exist. - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) - } - - // Record that we do not have an artifact - if obj.GetArtifact() == nil { - conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) - return sreconcile.ResultSuccess, nil - } - - // Always update URLs to ensure hostname is up-to-date - // TODO(hidde): we may want to send out an event only if we notice the URL has changed - r.Storage.SetArtifactURL(obj.GetArtifact()) - obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - - return sreconcile.ResultSuccess, nil -} - -// reconcileSource ensures the upstream Git repository and reference can be -// cloned and checked out using the specified configuration, and observes its -// state. It also checks if the included repositories are available for use. -// -// The included repositories are fetched and their metadata are stored. In case -// one of the included repositories isn't ready, it records -// v1beta2.IncludeUnavailableCondition=True and returns early. When all the -// included repositories are ready, it removes -// v1beta2.IncludeUnavailableCondition from the object. -// When the included artifactSet differs from the current set in the Status of -// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True. -// The repository is cloned to the given dir, using the specified configuration -// to check out the reference. In case of an error during this process -// (including transient errors), it records v1beta2.FetchFailedCondition=True -// and returns early. -// On a successful checkout, it removes v1beta2.FetchFailedCondition and -// compares the current revision of HEAD to the revision of the Artifact in the -// Status of the object. It records v1beta2.ArtifactOutdatedCondition=True when -// they differ. -// If specified, the signature of the Git commit is verified. If the signature -// can not be verified or the verification fails, it records -// v1beta2.SourceVerifiedCondition=False and returns early. When successful, -// it records v1beta2.SourceVerifiedCondition=True. -// When all the above is successful, the given Commit pointer is set to the -// commit of the checked out Git repository. -// -// If the optimized git clone feature is enabled, it checks if the remote repo -// and the local artifact are on the same revision, and no other source content -// related configurations have changed since last reconciliation. If there's a -// change, it short-circuits the whole reconciliation with an early return. -func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, - obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { - // Exit early, if we need to use libgit2 AND managed transport hasn't been intialized. - if !r.Libgit2TransportInitialized() && obj.Spec.GitImplementation == sourcev1.LibGit2Implementation { - return sreconcile.ResultEmpty, serror.NewStalling( - errors.New("libgit2 managed transport not initialized"), "Libgit2TransportNotEnabled", - ) - } - // Configure authentication strategy to access the source - var authOpts *git.AuthOptions - var err error - if obj.Spec.SecretRef != nil { - // Attempt to retrieve secret - name := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.Spec.SecretRef.Name, - } - var secret corev1.Secret - if err := r.Client.Get(ctx, name, &secret); err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to get secret '%s': %w", name.String(), err), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Return error as the world as observed may change - return sreconcile.ResultEmpty, e - } - - // Configure strategy with secret - authOpts, err = git.AuthOptionsFromSecret(obj.Spec.URL, &secret) - } else { - // Set the minimal auth options for valid transport. - authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL) - } - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Return error as the contents of the secret may change - return sreconcile.ResultEmpty, e - } - - // Fetch the included artifact metadata. - artifacts, err := r.fetchIncludes(ctx, obj) - if err != nil { - return sreconcile.ResultEmpty, err - } - - // Observe if the artifacts still match the previous included ones - if artifacts.Diff(obj.Status.IncludedArtifacts) { - message := fmt.Sprintf("included artifacts differ from last observed includes") - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message) - conditions.MarkReconciling(obj, "IncludeChange", message) - } - - // Persist the ArtifactSet. - *includes = *artifacts - - var optimizedClone bool - if val, ok := r.features[features.OptimizedGitClones]; ok && val { - optimizedClone = true - } - - c, err := r.gitCheckout(ctx, obj, authOpts, dir, optimizedClone) - if err != nil { - return sreconcile.ResultEmpty, err - } - // Assign the commit to the shared commit reference. - *commit = *c - - // If it's a partial commit obtained from an existing artifact, check if the - // reconciliation can be skipped if other configurations have not changed. - if !git.IsConcreteCommit(*commit) { - // Calculate content configuration checksum. - if r.calculateContentConfigChecksum(obj, includes) == obj.Status.ContentConfigChecksum { - ge := serror.NewGeneric( - fmt.Errorf("no changes since last reconcilation: observed revision '%s'", - commit.String()), sourcev1.GitOperationSucceedReason, - ) - ge.Notification = false - ge.Ignore = true - ge.Event = corev1.EventTypeNormal - // Remove any stale fetch failed condition. - conditions.Delete(obj, sourcev1.FetchFailedCondition) - // IMPORTANT: This must be set to ensure that the observed - // generation of this condition is updated. In case of full - // reconciliation reconcileArtifact() ensures that it's set at the - // very end. - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, - "stored artifact for revision '%s'", commit.String()) - // TODO: Find out if such condition setting is needed when commit - // signature verification is enabled. - return sreconcile.ResultEmpty, ge - } - - // If we can't skip the reconciliation, checkout again without any - // optimization. - c, err := r.gitCheckout(ctx, obj, authOpts, dir, false) - if err != nil { - return sreconcile.ResultEmpty, err - } - *commit = *c - } - ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String()) - conditions.Delete(obj, sourcev1.FetchFailedCondition) - - // Verify commit signature - if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { - return result, err - } - - // Mark observations about the revision on the object - if !obj.GetArtifact().HasRevision(commit.String()) { - message := fmt.Sprintf("new upstream revision '%s'", commit.String()) - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) - conditions.MarkReconciling(obj, "NewRevision", message) - } - return sreconcile.ResultSuccess, nil -} - -// reconcileArtifact archives a new Artifact to the Storage, if the current -// (Status) data on the object does not match the given. -// -// The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. -// If the given Artifact and/or artifactSet (includes) and the content config -// checksum do not differ from the object's current, it returns early. -// Source ignore patterns are loaded, and the given directory is archived while -// taking these patterns into account. -// On a successful archive, the Artifact, Includes and new content config -// checksum in the Status of the object are set, and the symlink in the Storage -// is updated to its path. -func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, - obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { - - // Create potential new artifact with current available metadata - artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) - - // Calculate the content config checksum. - ccc := r.calculateContentConfigChecksum(obj, includes) - - // Set the ArtifactInStorageCondition if there's no drift. - defer func() { - if obj.GetArtifact().HasRevision(artifact.Revision) && - !includes.Diff(obj.Status.IncludedArtifacts) && - obj.Status.ContentConfigChecksum == ccc { - conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, - "stored artifact for revision '%s'", artifact.Revision) - } - }() - - // The artifact is up-to-date - if obj.GetArtifact().HasRevision(artifact.Revision) && - !includes.Diff(obj.Status.IncludedArtifacts) && - obj.Status.ContentConfigChecksum == ccc { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) - return sreconcile.ResultSuccess, nil - } - - // Ensure target path exists and is a directory - if f, err := os.Stat(dir); err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to stat target artifact path: %w", err), - sourcev1.StatOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } else if !f.IsDir() { - e := serror.NewGeneric( - fmt.Errorf("invalid target path: '%s' is not a directory", dir), - sourcev1.InvalidPathReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(artifact); err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to create artifact directory: %w", err), - sourcev1.DirCreationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - unlock, err := r.Storage.Lock(artifact) - if err != nil { - return sreconcile.ResultEmpty, serror.NewGeneric( - fmt.Errorf("failed to acquire lock for artifact: %w", err), - meta.FailedReason, - ) - } - defer unlock() - - // Load ignore rules for archiving - ignoreDomain := strings.Split(dir, string(filepath.Separator)) - ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain) - if err != nil { - return sreconcile.ResultEmpty, serror.NewGeneric( - fmt.Errorf("failed to load source ignore patterns from repository: %w", err), - "SourceIgnoreError", - ) - } - if obj.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) - } - - // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { - e := serror.NewGeneric( - fmt.Errorf("unable to archive artifact to storage: %w", err), - sourcev1.ArchiveOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Record it on the object - obj.Status.Artifact = artifact.DeepCopy() - obj.Status.IncludedArtifacts = *includes - obj.Status.ContentConfigChecksum = ccc - - // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, - "failed to update status URL symlink: %s", err) - } - if url != "" { - obj.Status.URL = url - } - conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) - return sreconcile.ResultSuccess, nil -} - -// reconcileInclude reconciles the on the object specified -// v1beta2.GitRepositoryInclude list by copying their Artifact (sub)contents to -// the specified paths in the given directory. -// -// When one of the includes is unavailable, it marks the object with -// v1beta2.IncludeUnavailableCondition=True and returns early. -// When the copy operations are successful, it removes the -// v1beta2.IncludeUnavailableCondition from the object. -// When the composed artifactSet differs from the current set in the Status of -// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True. -func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, - obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { - - for i, incl := range obj.Spec.Include { - // Do this first as it is much cheaper than copy operations - toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), - "IllegalPath", - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Get artifact at the same include index. The artifactSet is created - // such that the index of artifactSet matches with the index of Include. - // Hence, index is used here to pick the associated artifact from - // includes. - var artifact *sourcev1.Artifact - for j, art := range *includes { - if i == j { - artifact = art - } - } - - // Copy artifact (sub)contents to configured directory. - if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), - Reason: "CopyFailure", - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - } - conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) - return sreconcile.ResultSuccess, nil -} - -// gitCheckout builds checkout options with the given configurations and -// performs a git checkout. -func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, - obj *sourcev1.GitRepository, authOpts *git.AuthOptions, dir string, optimized bool) (*git.Commit, error) { - // Configure checkout strategy. - checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules} - if ref := obj.Spec.Reference; ref != nil { - checkoutOpts.Branch = ref.Branch - checkoutOpts.Commit = ref.Commit - checkoutOpts.Tag = ref.Tag - checkoutOpts.SemVer = ref.SemVer - } - - // Only if the object has an existing artifact in storage, attempt to - // short-circuit clone operation. reconcileStorage has already verified - // that the artifact exists. - if optimized && conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) { - if artifact := obj.GetArtifact(); artifact != nil { - checkoutOpts.LastRevision = artifact.Revision - } - } - - gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) - defer cancel() - - checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(gitCtx, - git.Implementation(obj.Spec.GitImplementation), checkoutOpts) - if err != nil { - // Do not return err as recovery without changes is impossible. - e := &serror.Stalling{ - Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err), - Reason: sourcev1.GitOperationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return nil, e - } - - // this is needed only for libgit2, due to managed transport. - if obj.Spec.GitImplementation == sourcev1.LibGit2Implementation { - // We set the TransportOptionsURL of this set of authentication options here by constructing - // a unique URL that won't clash in a multi tenant environment. This unique URL is used by - // libgit2 managed transports. This enables us to bypass the inbuilt credentials callback in - // libgit2, which is inflexible and unstable. - if strings.HasPrefix(obj.Spec.URL, "http") { - authOpts.TransportOptionsURL = fmt.Sprintf("http://%s/%s/%d", obj.Name, obj.UID, obj.Generation) - } else if strings.HasPrefix(obj.Spec.URL, "ssh") { - authOpts.TransportOptionsURL = fmt.Sprintf("ssh://%s/%s/%d", obj.Name, obj.UID, obj.Generation) - } else { - e := &serror.Stalling{ - Err: fmt.Errorf("git repository URL '%s' has invalid transport type, supported types are: http, https, ssh", obj.Spec.URL), - Reason: sourcev1.URLInvalidReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return nil, e - } - } - - commit, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to checkout and determine revision: %w", err), - sourcev1.GitOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return nil, e - } - return commit, nil -} - -// fetchIncludes fetches artifact metadata of all the included repos. -func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *sourcev1.GitRepository) (*artifactSet, error) { - artifacts := make(artifactSet, len(obj.Spec.Include)) - for i, incl := range obj.Spec.Include { - // Retrieve the included GitRepository. - dep := &sourcev1.GitRepository{} - if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { - e := serror.NewWaiting( - fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err), - "NotFound", - ) - e.RequeueAfter = r.requeueDependency - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error()) - return nil, e - } - - // Confirm include has an artifact - if dep.GetArtifact() == nil { - e := serror.NewWaiting( - fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name), - "NoArtifact", - ) - e.RequeueAfter = r.requeueDependency - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error()) - return nil, e - } - - artifacts[i] = dep.GetArtifact().DeepCopy() - } - - // We now know all the includes are available. - conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) - - return &artifacts, nil -} - -// calculateContentConfigChecksum calculates a checksum of all the -// configurations that result in a change in the source artifact. It can be used -// to decide if further reconciliation is needed when an artifact already exists -// for a set of configurations. -func (r *GitRepositoryReconciler) calculateContentConfigChecksum(obj *sourcev1.GitRepository, includes *artifactSet) string { - c := []byte{} - // Consider the ignore rules and recurse submodules. - if obj.Spec.Ignore != nil { - c = append(c, []byte(*obj.Spec.Ignore)...) - } - c = append(c, []byte(strconv.FormatBool(obj.Spec.RecurseSubmodules))...) - - // Consider the included repository attributes. - for _, incl := range obj.Spec.Include { - c = append(c, []byte(incl.GitRepositoryRef.Name+incl.FromPath+incl.ToPath)...) - } - - // Consider the checksum and revision of all the included remote artifact. - // This ensures that if the included repos get updated, this checksum changes. - // NOTE: The content of an artifact may change at the same revision if the - // ignore rules change. Hence, consider both checksum and revision to - // capture changes in artifact checksum as well. - // TODO: Fix artifactSet.Diff() to consider checksum as well. - if includes != nil { - for _, incl := range *includes { - c = append(c, []byte(incl.Checksum)...) - c = append(c, []byte(incl.Revision)...) - } - } - - return fmt.Sprintf("sha256:%x", sha256.Sum256(c)) -} - -// verifyCommitSignature verifies the signature of the given Git commit, if a -// verification mode is specified on the object. -// If the signature can not be verified or the verification fails, it records -// v1beta2.SourceVerifiedCondition=False and returns. -// When successful, it records v1beta2.SourceVerifiedCondition=True. -// If no verification mode is specified on the object, the -// v1beta2.SourceVerifiedCondition Condition is removed. -func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { - // Check if there is a commit verification is configured and remove any old - // observations if there is none - if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { - conditions.Delete(obj, sourcev1.SourceVerifiedCondition) - return sreconcile.ResultSuccess, nil - } - - // Get secret with GPG data - publicKeySecret := types.NamespacedName{ - Namespace: obj.Namespace, - Name: obj.Spec.Verification.SecretRef.Name, - } - secret := &corev1.Secret{} - if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { - e := serror.NewGeneric( - fmt.Errorf("PGP public keys secret error: %w", err), - "VerificationError", - ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - var keyRings []string - for _, v := range secret.Data { - keyRings = append(keyRings, string(v)) - } - // Verify commit with GPG data from secret - if _, err := commit.Verify(keyRings...); err != nil { - e := serror.NewGeneric( - fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err), - "InvalidCommitSignature", - ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) - // Return error in the hope the secret changes - return sreconcile.ResultEmpty, e - } - - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, - "verified signature of commit '%s'", commit.Hash.String()) - r.eventLogf(ctx, obj, events.EventTypeTrace, "VerifiedCommit", - "verified signature of commit '%s'", commit.Hash.String()) - return sreconcile.ResultSuccess, nil -} - -// reconcileDelete handles the deletion of the object. -// It first garbage collects all Artifacts for the object from the Storage. -// Removing the finalizer from the object if successful. -func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) { - // Garbage collect the resource's artifacts - if err := r.garbageCollect(ctx, obj); err != nil { - // Return the error so we retry the failed garbage collection - return sreconcile.ResultEmpty, err - } - - // Remove our finalizer from the list - controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) - - // Stop reconciliation as the object is being deleted - return sreconcile.ResultEmpty, nil -} - -// garbageCollect performs a garbage collection for the given object. -// -// It removes all but the current Artifact from the Storage, unless the -// deletion timestamp on the object is set. Which will result in the -// removal of all Artifacts for the objects. -func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { - if !obj.DeletionTimestamp.IsZero() { - if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - return serror.NewGeneric( - fmt.Errorf("garbage collection for deleted resource failed: %w", err), - "GarbageCollectionFailed", - ) - } else if deleted != "" { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected artifacts for deleted resource") - } - obj.Status.Artifact = nil - return nil - } - if obj.GetArtifact() != nil { - delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) - if err != nil { - return serror.NewGeneric( - fmt.Errorf("garbage collection of artifacts failed: %w", err), - "GarbageCollectionFailed", - ) - } - if len(delFiles) > 0 { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) - return nil - } - } - return nil -} - -// eventLogf records events, and logs at the same time. -// -// This log is different from the debug log in the EventRecorder, in the sense -// that this is a simple log. While the debug log contains complete details -// about the event. -func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { - msg := fmt.Sprintf(messageFmt, args...) - // Log and emit event. - if eventType == corev1.EventTypeWarning { - ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) - } else { - ctrl.LoggerFrom(ctx).Info(msg) - } - r.Eventf(obj, eventType, reason, msg) -} diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go deleted file mode 100644 index bfb857df0..000000000 --- a/controllers/gitrepository_controller_test.go +++ /dev/null @@ -1,2214 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/darkowlzz/controller-check/status" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" - "github.com/fluxcd/pkg/ssh" - "github.com/fluxcd/pkg/testserver" - "github.com/go-git/go-billy/v5/memfs" - gogit "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/gomega" - sshtestdata "golang.org/x/crypto/ssh/testdata" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - serror "github.com/fluxcd/source-controller/internal/error" - "github.com/fluxcd/source-controller/internal/features" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" -) - -const ( - encodedCommitFixture = `tree f0c522d8cc4c90b73e2bc719305a896e7e3c108a -parent eb167bc68d0a11530923b1f24b4978535d10b879 -author Stefan Prodan 1633681364 +0300 -committer Stefan Prodan 1633681364 +0300 - -Update containerd and runc to fix CVEs - -Signed-off-by: Stefan Prodan -` - malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879 -author Stefan Prodan 1633681364 +0300 -committer Stefan Prodan 1633681364 +0300 - -Update containerd and runc to fix CVEs - -Signed-off-by: Stefan Prodan -` - signatureCommitFixture = `-----BEGIN PGP SIGNATURE----- - -iHUEABEIAB0WIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCYV//1AAKCRAyma6w5Ahb -r7nJAQCQU4zEJu04/Q0ac/UaL6htjhq/wTDNMeUM+aWG/LcBogEAqFUea1oR2BJQ -JCJmEtERFh39zNWSazQmxPAFhEE0kbc= -=+Wlj ------END PGP SIGNATURE-----` - armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQSuBF9+HgMRDADKT8UBcSzpTi4JXt/ohhVW3x81AGFPrQvs6MYrcnNJfIkPTJD8 -mY5T7j1fkaN5wcf1wnxM9qTcW8BodkWNGEoEYOtVuigLSxPFqIncxK0PHvdU8ths -TEInBrgZv9t6xIVa4QngOEUd2D/aYni7M+75z7ntgj6eU1xLZ60upRFn05862OvJ -rZFUvzjsZXMAO3enCu2VhG/2axCY/5uI8PgWjyiKV2TH4LBJgzlb0v6SyI+fYf5K -Bg2WzDuLKvQBi9tFSwnUbQoFFlOeiGW8G/bdkoJDWeS1oYgSD3nkmvXvrVESCrbT -C05OtQOiDXjSpkLim81vNVPtI2XEug+9fEA+jeJakyGwwB+K8xqV3QILKCoWHKGx -yWcMHSR6cP9tdXCk2JHZBm1PLSJ8hIgMH/YwBJLYg90u8lLAs9WtpVBKkLplzzgm -B4Z4VxCC+xI1kt+3ZgYvYC+oUXJXrjyAzy+J1f+aWl2+S/79glWgl/xz2VibWMz6 -nZUE+wLMxOQqyOsBALsoE6z81y/7gfn4R/BziBASi1jq/r/wdboFYowmqd39DACX -+i+V0OplP2TN/F5JajzRgkrlq5cwZHinnw+IFwj9RTfOkdGb3YwhBt/h2PP38969 -ZG+y8muNtaIqih1pXj1fz9HRtsiCABN0j+JYpvV2D2xuLL7P1O0dt5BpJ3KqNCRw -mGgO2GLxbwvlulsLidCPxdK/M8g9Eeb/xwA5LVwvjVchHkzHuUT7durn7AT0RWiK -BT8iDfeBB9RKienAbWyybEqRaR6/Tv+mghFIalsDiBPbfm4rsNzsq3ohfByqECiy -yUvs2O3NDwkoaBDkA3GFyKv8/SVpcuL5OkVxAHNCIMhNzSgotQ3KLcQc0IREfFCa -3CsBAC7CsE2bJZ9IA9sbBa3jimVhWUQVudRWiLFeYHUF/hjhqS8IHyFwprjEOLaV -EG0kBO6ELypD/bOsmN9XZLPYyI3y9DM6Vo0KMomE+yK/By/ZMxVfex8/TZreUdhP -VdCLL95Rc4w9io8qFb2qGtYBij2wm0RWLcM0IhXWAtjI3B17IN+6hmv+JpiZccsM -AMNR5/RVdXIl0hzr8LROD0Xe4sTyZ+fm3mvpczoDPQNRrWpmI/9OT58itnVmZ5jM -7djV5y/NjBk63mlqYYfkfWto97wkhg0MnTnOhzdtzSiZQRzj+vf+ilLfIlLnuRr1 -JRV9Skv6xQltcFArx4JyfZCo7JB1ZXcbdFAvIXXS11RTErO0XVrXNm2RenpW/yZA -9f+ESQ/uUB6XNuyqVUnJDAFJFLdzx8sO3DXo7dhIlgpFqgQobUl+APpbU5LT95sm -89UrV0Lt9vh7k6zQtKOjEUhm+dErmuBnJo8MvchAuXLagHjvb58vYBCUxVxzt1KG -2IePwJ/oXIfawNEGad9Lmdo1FYG1u53AKWZmpYOTouu92O50FG2+7dBh0V2vO253 -aIGFRT1r14B1pkCIun7z7B/JELqOkmwmlRrUnxlADZEcQT3z/S8/4+2P7P6kXO7X -/TAX5xBhSqUbKe3DhJSOvf05/RVL5ULc2U2JFGLAtmBOFmnD/u0qoo5UvWliI+v/ -47QnU3RlZmFuIFByb2RhbiA8c3RlZmFuLnByb2RhbkBnbWFpbC5jb20+iJAEExEI -ADgWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34eAwIbAwULCQgHAgYVCgkICwIE -FgIDAQIeAQIXgAAKCRAyma6w5Ahbrzu/AP9l2YpRaWZr6wSQuEn0gMN8DRzsWJPx -pn0akdY7SRP3ngD9GoKgu41FAItnHAJ2KiHv/fHFyHMndNP3kPGPNW4BF+65Aw0E -X34eAxAMAMdYFCHmVA8TZxSTMBDpKYave8RiDCMMMjk26Gl0EPN9f2Y+s5++DhiQ -hojNH9VmJkFwZX1xppxe1y1aLa/U6fBAqMP/IdNH8270iv+A9YIxdsWLmpm99BDO -3suRfsHcOe9T0x/CwRfDNdGM/enGMhYGTgF4VD58DRDE6WntaBhl4JJa300NG6X0 -GM4Gh59DKWDnez/Shulj8demlWmakP5imCVoY+omOEc2k3nH02U+foqaGG5WxZZ+ -GwEPswm2sBxvn8nwjy9gbQwEtzNI7lWYiz36wCj2VS56Udqt+0eNg8WzocUT0XyI -moe1qm8YJQ6fxIzaC431DYi/mCDzgx4EV9ww33SXX3Yp2NL6PsdWJWw2QnoqSMpM -z5otw2KlMgUHkkXEKs0apmK4Hu2b6KD7/ydoQRFUqR38Gb0IZL1tOL6PnbCRUcig -Aypy016W/WMCjBfQ8qxIGTaj5agX2t28hbiURbxZkCkz+Z3OWkO0Rq3Y2hNAYM5s -eTn94JIGGwADBgv/dbSZ9LrBvdMwg8pAtdlLtQdjPiT1i9w5NZuQd7OuKhOxYTEB -NRDTgy4/DgeNThCeOkMB/UQQPtJ3Et45S2YRtnnuvfxgnlz7xlUn765/grtnRk4t -ONjMmb6tZos1FjIJecB/6h4RsvUd2egvtlpD/Z3YKr6MpNjWg4ji7m27e9pcJfP6 -YpTDrq9GamiHy9FS2F2pZlQxriPpVhjCLVn9tFGBIsXNxxn7SP4so6rJBmyHEAlq -iym9wl933e0FIgAw5C1vvprYu2amk+jmVBsJjjCmInW5q/kWAFnFaHBvk+v+/7tX -hywWUI7BqseikgUlkgJ6eU7E9z1DEyuS08x/cViDoNh2ntVUhpnluDu48pdqBvvY -a4uL/D+KI84THUAJ/vZy+q6G3BEb4hI9pFjgrdJpUKubxyZolmkCFZHjV34uOcTc -LQr28P8xW8vQbg5DpIsivxYLqDGXt3OyiItxvLMtw/ypt6PkoeP9A4KDST4StITE -1hrOrPtJ/VRmS2o0iHgEGBEIACAWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34e -AwIbDAAKCRAyma6w5Ahbr6QWAP9/pl2R6r1nuCnXzewSbnH1OLsXf32hFQAjaQ5o -Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE= -=/4e+ ------END PGP PUBLIC KEY BLOCK----- -` - emptyContentConfigChecksum = "sha256:fcbcf165908dd18a9e49f7ff27810176db8e9f63b4352213741664245224f8aa" -) - -var ( - testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation} -) - -func mockTransportNotInitialized() bool { - return false -} - -func TestGitRepositoryReconciler_Reconcile(t *testing.T) { - g := NewWithT(t) - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(server.Root()) - server.AutoCreate() - g.Expect(server.StartHTTP()).To(Succeed()) - defer server.StopHTTP() - - repoPath := "/test.git" - _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "gitrepository-reconcile-", - Namespace: "default", - }, - Spec: sourcev1.GitRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - URL: server.HTTPAddress() + repoPath, - }, - } - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for GitRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) || obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: gitRepositoryReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Patch the object with reconcile request annotation. - patchHelper, err := patch.NewHelper(obj, testEnv.Client) - g.Expect(err).ToNot(HaveOccurred()) - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return obj.Status.LastHandledReconcileAt == "now" - }, timeout).Should(BeTrue()) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for GitRepository to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) -} - -func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { - type options struct { - username string - password string - publicKey []byte - privateKey []byte - ca []byte - } - - tests := []struct { - name string - skipForImplementation string - protocol string - server options - secret *corev1.Secret - beforeFunc func(obj *sourcev1.GitRepository) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "HTTP without secretRef makes ArtifactOutdated=True", - protocol: "http", - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), - }, - }, - { - name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", - protocol: "http", - server: options{ - username: "git", - password: "1234", - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "basic-auth", - }, - Data: map[string][]byte{ - "username": []byte("git"), - "password": []byte("1234"), - }, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), - }, - }, - { - name: "HTTPS with CAFile secret makes ArtifactOutdated=True", - protocol: "https", - server: options{ - publicKey: tlsPublicKey, - privateKey: tlsPrivateKey, - ca: tlsCA, - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", - }, - Data: map[string][]byte{ - "caFile": tlsCA, - }, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), - }, - }, - { - name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", - skipForImplementation: sourcev1.LibGit2Implementation, - protocol: "https", - server: options{ - publicKey: tlsPublicKey, - privateKey: tlsPrivateKey, - ca: tlsCA, - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-ca", - }, - Data: map[string][]byte{ - "caFile": []byte("invalid"), - }, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} - }, - wantErr: true, - assertConditions: []metav1.Condition{ - // The expected error messages may differ when in darwin. In some cases it will match the - // error message expected in linux: "x509: certificate signed by unknown authority". In - // other cases it may get "x509: “example.com” certificate is not standards compliant" instead. - // - // Trimming the expected error message for consistent results. - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "x509: "), - }, - }, - { - name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", - skipForImplementation: sourcev1.GoGitImplementation, - protocol: "https", - server: options{ - publicKey: tlsPublicKey, - privateKey: tlsPrivateKey, - ca: tlsCA, - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-ca", - }, - Data: map[string][]byte{ - "caFile": []byte("invalid"), - }, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "failed to checkout and determine revision: unable to fetch-connect to remote '': PEM CA bundle could not be appended to x509 certificate pool"), - }, - }, - { - name: "SSH with private key secret makes ArtifactOutdated=True", - protocol: "ssh", - server: options{ - username: "git", - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "private-key", - }, - Data: map[string][]byte{ - "username": []byte("git"), - "identity": sshtestdata.PEMBytes["rsa"], - }, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), - }, - }, - { - name: "SSH with password protected private key secret makes ArtifactOutdated=True", - protocol: "ssh", - server: options{ - username: "git", - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "private-key", - }, - Data: map[string][]byte{ - "username": []byte("git"), - "identity": sshtestdata.PEMEncryptedKeys[2].PEMBytes, - "password": []byte("password"), - }, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), - }, - }, - { - name: "Include get failure makes CheckoutFailed=True and returns error", - protocol: "http", - server: options{ - username: "git", - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/non-existing': secrets \"non-existing\" not found"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "auth-strategy-", - }, - Spec: sourcev1.GitRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(server.Root()) - server.AutoCreate() - - repoPath := "/test.git" - localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - if len(tt.server.username+tt.server.password) > 0 { - server.Auth(tt.server.username, tt.server.password) - } - - secret := tt.secret.DeepCopy() - switch tt.protocol { - case "http": - g.Expect(server.StartHTTP()).To(Succeed()) - defer server.StopHTTP() - obj.Spec.URL = server.HTTPAddress() + repoPath - case "https": - g.Expect(server.StartHTTPS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) - obj.Spec.URL = server.HTTPAddress() + repoPath - case "ssh": - server.KeyDir(filepath.Join(server.Root(), "keys")) - - g.Expect(server.ListenSSH()).To(Succeed()) - obj.Spec.URL = server.SSHAddress() + repoPath - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - if secret != nil && len(secret.Data["known_hosts"]) == 0 { - u, err := url.Parse(obj.Spec.URL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) - g.Expect(err).NotTo(HaveOccurred()) - secret.Data["known_hosts"] = knownHosts - } - default: - t.Fatalf("unsupported protocol %q", tt.protocol) - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - if secret != nil { - builder.WithObjects(secret.DeepCopy()) - } - - r := &GitRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - Libgit2TransportInitialized: managed.Enabled, - } - - for _, i := range testGitImplementations { - t.Run(i, func(t *testing.T) { - g := NewWithT(t) - - if tt.skipForImplementation == i { - t.Skipf("Skipped for Git implementation %q", i) - } - - tmpDir := t.TempDir() - - obj := obj.DeepCopy() - obj.Spec.GitImplementation = i - - head, _ := localRepo.Head() - assertConditions := tt.assertConditions - for k := range assertConditions { - assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", head.Hash().String()) - assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL) - } - - var commit git.Commit - var includes artifactSet - - got, err := r.reconcileSource(context.TODO(), obj, &commit, &includes, tmpDir) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - g.Expect(commit).ToNot(BeNil()) - }) - } - }) - } -} - -func TestGitRepositoryReconciler_reconcileSource_libgit2TransportUninitialized(t *testing.T) { - g := NewWithT(t) - - r := &GitRepositoryReconciler{ - Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - Libgit2TransportInitialized: mockTransportNotInitialized, - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "libgit2-transport", - }, - Spec: sourcev1.GitRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - Reference: &sourcev1.GitRepositoryRef{ - Branch: git.DefaultBranch, - }, - GitImplementation: sourcev1.LibGit2Implementation, - }, - } - - tmpDir := t.TempDir() - var commit git.Commit - var includes artifactSet - _, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(BeAssignableToTypeOf(&serror.Stalling{})) - g.Expect(err.Error()).To(Equal("libgit2 managed transport not initialized")) -} - -func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) { - g := NewWithT(t) - - branches := []string{"staging"} - tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"} - - tests := []struct { - name string - skipForImplementation string - reference *sourcev1.GitRepositoryRef - beforeFunc func(obj *sourcev1.GitRepository, latestRev string) - want sreconcile.Result - wantErr bool - wantRevision string - wantArtifactOutdated bool - }{ - { - name: "Nil reference (default branch)", - want: sreconcile.ResultSuccess, - wantRevision: "master/", - wantArtifactOutdated: true, - }, - { - name: "Branch", - reference: &sourcev1.GitRepositoryRef{ - Branch: "staging", - }, - want: sreconcile.ResultSuccess, - wantRevision: "staging/", - wantArtifactOutdated: true, - }, - { - name: "Tag", - reference: &sourcev1.GitRepositoryRef{ - Tag: "v0.1.0", - }, - want: sreconcile.ResultSuccess, - wantRevision: "v0.1.0/", - wantArtifactOutdated: true, - }, - { - name: "Branch commit", - skipForImplementation: sourcev1.LibGit2Implementation, - reference: &sourcev1.GitRepositoryRef{ - Branch: "staging", - Commit: "", - }, - want: sreconcile.ResultSuccess, - wantRevision: "staging/", - wantArtifactOutdated: true, - }, - { - name: "Branch commit", - skipForImplementation: sourcev1.GoGitImplementation, - reference: &sourcev1.GitRepositoryRef{ - Branch: "staging", - Commit: "", - }, - want: sreconcile.ResultSuccess, - wantRevision: "HEAD/", - wantArtifactOutdated: true, - }, - { - name: "SemVer", - reference: &sourcev1.GitRepositoryRef{ - SemVer: "*", - }, - want: sreconcile.ResultSuccess, - wantRevision: "v2.0.0/", - wantArtifactOutdated: true, - }, - { - name: "SemVer range", - reference: &sourcev1.GitRepositoryRef{ - SemVer: "", - wantArtifactOutdated: true, - }, - { - name: "SemVer prerelease", - reference: &sourcev1.GitRepositoryRef{ - SemVer: ">=1.0.0-0 <1.1.0-0", - }, - wantRevision: "v1.0.0-alpha/", - want: sreconcile.ResultSuccess, - wantArtifactOutdated: true, - }, - { - name: "Optimized clone", - reference: &sourcev1.GitRepositoryRef{ - Branch: "staging", - }, - beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { - // Add existing artifact on the object and storage. - obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ - Revision: "staging/" + latestRev, - Path: randStringRunes(10), - }, - // Checksum with all the relevant fields unset. - ContentConfigChecksum: emptyContentConfigChecksum, - } - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") - }, - want: sreconcile.ResultEmpty, - wantErr: true, - wantRevision: "staging/", - wantArtifactOutdated: false, - }, - { - name: "Optimized clone different ignore", - reference: &sourcev1.GitRepositoryRef{ - Branch: "staging", - }, - beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { - // Set new ignore value. - obj.Spec.Ignore = pointer.StringPtr("foo") - // Add existing artifact on the object and storage. - obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ - Revision: "staging/" + latestRev, - Path: randStringRunes(10), - }, - // Checksum with all the relevant fields unset. - ContentConfigChecksum: emptyContentConfigChecksum, - } - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") - }, - want: sreconcile.ResultSuccess, - wantRevision: "staging/", - wantArtifactOutdated: false, - }, - } - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).To(BeNil()) - defer os.RemoveAll(server.Root()) - server.AutoCreate() - g.Expect(server.StartHTTP()).To(Succeed()) - defer server.StopHTTP() - - repoPath := "/test.git" - localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - headRef, err := localRepo.Head() - g.Expect(err).NotTo(HaveOccurred()) - - for _, branch := range branches { - g.Expect(remoteBranchForHead(localRepo, headRef, branch)).To(Succeed()) - } - for _, tag := range tags { - g.Expect(remoteTagForHead(localRepo, headRef, tag)).To(Succeed()) - } - - r := &GitRepositoryReconciler{ - Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - Libgit2TransportInitialized: managed.Enabled, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "checkout-strategy-", - }, - Spec: sourcev1.GitRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - URL: server.HTTPAddress() + repoPath, - Reference: tt.reference, - }, - } - - if obj.Spec.Reference != nil && obj.Spec.Reference.Commit == "" { - obj.Spec.Reference.Commit = headRef.Hash().String() - } - - for _, i := range testGitImplementations { - t.Run(i, func(t *testing.T) { - g := NewWithT(t) - - if tt.skipForImplementation == i { - t.Skipf("Skipped for Git implementation %q", i) - } - - tmpDir := t.TempDir() - - obj := obj.DeepCopy() - obj.Spec.GitImplementation = i - - if tt.beforeFunc != nil { - tt.beforeFunc(obj, headRef.Hash().String()) - } - - var commit git.Commit - var includes artifactSet - got, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir) - if err != nil { - println(err.Error()) - } - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - if tt.wantRevision != "" && !tt.wantErr { - revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String()) - g.Expect(commit.String()).To(Equal(revision)) - g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(Equal(tt.wantArtifactOutdated)) - } - }) - } - }) - } -} - -func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { - tests := []struct { - name string - dir string - includes artifactSet - beforeFunc func(obj *sourcev1.GitRepository) - afterFunc func(t *WithT, obj *sourcev1.GitRepository) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "Archiving artifact to storage makes ArtifactInStorage=True", - dir: "testdata/git/repository", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.Status.URL).ToNot(BeEmpty()) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Archiving artifact to storage with includes makes ArtifactInStorage=True", - dir: "testdata/git/repository", - includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}}, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) - t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) - t.Expect(obj.Status.URL).ToNot(BeEmpty()) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Up-to-date artifact should not update status", - dir: "testdata/git/repository", - includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}}, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"} - obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision", Checksum: "some-checksum"}} - obj.Status.ContentConfigChecksum = "sha256:f825d11a1c5987e033d2cb36449a3b0435a6abc9b2bfdbcdcc7c49bf40e9285d" - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.Status.URL).To(BeEmpty()) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Spec ignore overwrite is taken into account", - dir: "testdata/git/repository", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Spec.Ignore = pointer.StringPtr("!**.txt\n") - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd")) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "source ignore for subdir ignore patterns", - dir: "testdata/git/repowithsubdirs", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("29186e024dde5a414cfc990829c6b2e85f6b3bd2d950f50ca9f418f5d2261d79")) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Removes ArtifactOutdatedCondition after creating new artifact", - dir: "testdata/git/repository", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) - t.Expect(obj.Status.URL).ToNot(BeEmpty()) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Creates latest symlink to the created artifact", - dir: "testdata/git/repository", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - - localPath := testStorage.LocalPath(*obj.GetArtifact()) - symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") - targetFile, err := os.Readlink(symlinkPath) - t.Expect(err).NotTo(HaveOccurred()) - t.Expect(localPath).To(Equal(targetFile)) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), - }, - }, - { - name: "Target path does not exists", - dir: "testdata/git/foo", - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat target artifact path"), - }, - }, - { - name: "Target path is not a directory", - dir: "testdata/git/repository/foo.txt", - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "invalid target path"), - }, - }, - } - artifactSize := func(g *WithT, artifactURL string) *int64 { - if artifactURL == "" { - return nil - } - res, err := http.Get(artifactURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res.StatusCode).To(Equal(http.StatusOK)) - defer res.Body.Close() - return &res.ContentLength - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - resetChmod(tt.dir, 0o755, 0o644) - - r := &GitRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "reconcile-artifact-", - Generation: 1, - }, - Status: sourcev1.GitRepositoryStatus{}, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - commit := git.Commit{ - Hash: []byte("revision"), - Reference: "refs/heads/main", - } - - got, err := r.reconcileArtifact(ctx, obj, &commit, &tt.includes, tt.dir) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - if obj.Status.Artifact != nil { - g.Expect(obj.Status.Artifact.Size).To(Equal(artifactSize(g, obj.Status.Artifact.URL))) - } - - if tt.afterFunc != nil { - tt.afterFunc(g, obj) - } - }) - } -} - -func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { - g := NewWithT(t) - - server, err := testserver.NewTempArtifactServer() - g.Expect(err).NotTo(HaveOccurred()) - storage, err := newTestStorage(server.HTTPServer) - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(storage.BasePath) - - dependencyInterval := 5 * time.Second - - type dependency struct { - name string - withArtifact bool - conditions []metav1.Condition - } - - type include struct { - name string - fromPath string - toPath string - shouldExist bool - } - - tests := []struct { - name string - dependencies []dependency - includes []include - beforeFunc func(obj *sourcev1.GitRepository) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "New includes make ArtifactOutdated=True", - dependencies: []dependency{ - { - name: "a", - withArtifact: true, - conditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), - }, - }, - { - name: "b", - withArtifact: true, - conditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), - }, - }, - }, - includes: []include{ - {name: "a", toPath: "a/", shouldExist: true}, - {name: "b", toPath: "b/", shouldExist: true}, - }, - want: sreconcile.ResultSuccess, - }, - { - name: "Invalid FromPath makes IncludeUnavailable=True and returns error", - dependencies: []dependency{ - { - name: "a", - withArtifact: true, - }, - }, - includes: []include{ - {name: "a", fromPath: "../../../path", shouldExist: false}, - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, "CopyFailure", "unpack/path: no such file or directory"), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - var depObjs []client.Object - for _, d := range tt.dependencies { - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: d.name, - }, - Status: sourcev1.GitRepositoryStatus{ - Conditions: d.conditions, - }, - } - if d.withArtifact { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: d.name + ".tar.gz", - Revision: d.name, - LastUpdateTime: metav1.Now(), - } - g.Expect(storage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)).To(Succeed()) - } - depObjs = append(depObjs, obj) - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - if len(tt.dependencies) > 0 { - builder.WithObjects(depObjs...) - } - - r := &GitRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: storage, - requeueDependency: dependencyInterval, - features: features.FeatureGates(), - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "reconcile-include", - }, - Spec: sourcev1.GitRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - }, - } - - for i, incl := range tt.includes { - incl := sourcev1.GitRepositoryInclude{ - GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, - FromPath: incl.fromPath, - ToPath: incl.toPath, - } - tt.includes[i].fromPath = incl.GetFromPath() - tt.includes[i].toPath = incl.GetToPath() - obj.Spec.Include = append(obj.Spec.Include, incl) - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - tmpDir := t.TempDir() - - var commit git.Commit - var includes artifactSet - - // Build includes artifactSet. - artifactSet, err := r.fetchIncludes(ctx, obj) - g.Expect(err).ToNot(HaveOccurred()) - includes = *artifactSet - - got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir) - g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - g.Expect(err != nil).To(Equal(tt.wantErr)) - if err == nil { - g.Expect(len(includes)).To(Equal(len(tt.includes))) - } - g.Expect(got).To(Equal(tt.want)) - for _, i := range tt.includes { - if i.toPath != "" { - expect := g.Expect(filepath.Join(tmpDir, i.toPath)) - if i.shouldExist { - expect.To(BeADirectory()) - } else { - expect.NotTo(BeADirectory()) - } - } - if i.shouldExist { - g.Expect(filepath.Join(tmpDir, i.toPath)).Should(BeADirectory()) - } else { - g.Expect(filepath.Join(tmpDir, i.toPath)).ShouldNot(BeADirectory()) - } - } - }) - } -} - -func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.GitRepository, storage *Storage) error - want sreconcile.Result - wantErr bool - assertArtifact *sourcev1.Artifact - assertConditions []metav1.Condition - assertPaths []string - }{ - { - name: "garbage collects", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - revisions := []string{"a", "b", "c", "d"} - for n := range revisions { - v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), - Revision: v, - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { - return err - } - if n != len(revisions)-1 { - time.Sleep(time.Second * 1) - } - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/d.txt", - Revision: "d", - Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", - URL: testStorage.Hostname + "/reconcile-storage/d.txt", - Size: int64p(int64(len("d"))), - }, - assertPaths: []string{ - "/reconcile-storage/d.txt", - "/reconcile-storage/c.txt", - "!/reconcile-storage/b.txt", - "!/reconcile-storage/a.txt", - }, - want: sreconcile.ResultSuccess, - }, - { - name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/reconcile-storage/invalid.txt", - Revision: "e", - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "!/reconcile-storage/invalid.txt", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), - }, - }, - { - name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: "http://outdated.com/reconcile-storage/hostname.txt", - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { - return err - } - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "/reconcile-storage/hostname.txt", - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", - Size: int64p(int64(len("file"))), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - defer func() { - g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) - }() - - r := &GitRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - }, - } - if tt.beforeFunc != nil { - g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) - } - - var c *git.Commit - var as artifactSet - got, err := r.reconcileStorage(context.TODO(), obj, c, &as, "") - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) - if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { - g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) - } - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - for _, p := range tt.assertPaths { - absoluteP := filepath.Join(testStorage.BasePath, p) - if !strings.HasPrefix(p, "!") { - g.Expect(absoluteP).To(BeAnExistingFile()) - continue - } - g.Expect(absoluteP).NotTo(BeAnExistingFile()) - } - }) - } -} - -func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { - g := NewWithT(t) - - r := &GitRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "reconcile-delete-", - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{ - sourcev1.SourceFinalizer, - }, - }, - Status: sourcev1.GitRepositoryStatus{}, - } - - artifact := testStorage.NewArtifactFor(sourcev1.GitRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") - obj.Status.Artifact = &artifact - - got, err := r.reconcileDelete(ctx, obj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(sreconcile.ResultEmpty)) - g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) - g.Expect(obj.Status.Artifact).To(BeNil()) -} - -func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { - tests := []struct { - name string - secret *corev1.Secret - commit git.Commit - beforeFunc func(obj *sourcev1.GitRepository) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "Valid commit makes SourceVerifiedCondition=True", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "existing", - }, - Data: map[string][]byte{ - "foo": []byte(armoredKeyRingFixture), - }, - }, - commit: git.Commit{ - Hash: []byte("shasum"), - Encoded: []byte(encodedCommitFixture), - Signature: signatureCommitFixture, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ - Mode: "head", - SecretRef: meta.LocalObjectReference{ - Name: "existing", - }, - } - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit 'shasum'"), - }, - }, - { - name: "Invalid commit sets no SourceVerifiedCondition and returns error", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "existing", - }, - }, - commit: git.Commit{ - Hash: []byte("shasum"), - Encoded: []byte(malformedEncodedCommitFixture), - Signature: signatureCommitFixture, - }, - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ - Mode: "head", - SecretRef: meta.LocalObjectReference{ - Name: "existing", - }, - } - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: failed to verify commit with any of the given key rings"), - }, - }, - { - name: "Secret get failure sets no SourceVerifiedCondition and returns error", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ - Mode: "head", - SecretRef: meta.LocalObjectReference{ - Name: "none-existing", - }, - } - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "VerificationError", "PGP public keys secret error: secrets \"none-existing\" not found"), - }, - }, - { - name: "Nil verification in spec deletes SourceVerified condition", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{}, - }, - { - name: "Empty verification mode in spec deletes SourceVerified condition", - beforeFunc: func(obj *sourcev1.GitRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - if tt.secret != nil { - builder.WithObjects(tt.secret) - } - - r := &GitRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Client: builder.Build(), - features: features.FeatureGates(), - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "verify-commit-", - Generation: 1, - }, - Status: sourcev1.GitRepositoryStatus{}, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - got, err := r.verifyCommitSignature(context.TODO(), obj, tt.commit) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - }) - } -} - -func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { - g := NewWithT(t) - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(server.Root()) - server.AutoCreate() - g.Expect(server.StartHTTP()).To(Succeed()) - defer server.StopHTTP() - - repoPath := "/test.git" - _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - tests := []struct { - name string - beforeFunc func(obj *sourcev1.GitRepository) - want ctrl.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "no failure condition", - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), - }, - }, - { - name: "reconciling condition", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), - }, - }, - { - name: "stalled condition", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), - }, - }, - { - name: "mixed failed conditions", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), - }, - }, - { - name: "reconciling and failed conditions", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), - }, - }, - { - name: "stalled and failed conditions", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "condition-update", - Namespace: "default", - Finalizers: []string{sourcev1.SourceFinalizer}, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: server.HTTPAddress() + repoPath, - GitImplementation: sourcev1.GoGitImplementation, - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()).WithObjects(obj) - - r := &GitRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - features: features.FeatureGates(), - Libgit2TransportInitialized: managed.Enabled, - } - - key := client.ObjectKeyFromObject(obj) - res, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: key}) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(res).To(Equal(tt.want)) - - updatedObj := &sourcev1.GitRepository{} - err = r.Get(ctx, key, updatedObj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(updatedObj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -// helpers - -func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) { - fs := memfs.New() - repo, err := gogit.Init(memory.NewStorage(), fs) - if err != nil { - return nil, err - } - - branchRef := plumbing.NewBranchReferenceName(branch) - if err = repo.CreateBranch(&config.Branch{ - Name: branch, - Remote: gogit.DefaultRemoteName, - Merge: branchRef, - }); err != nil { - return nil, err - } - - err = commitFromFixture(repo, fixture) - if err != nil { - return nil, err - } - - if server.HTTPAddress() == "" { - if err = server.StartHTTP(); err != nil { - return nil, err - } - defer server.StopHTTP() - } - if _, err = repo.CreateRemote(&config.RemoteConfig{ - Name: gogit.DefaultRemoteName, - URLs: []string{server.HTTPAddressWithCredentials() + repositoryPath}, - }); err != nil { - return nil, err - } - - if err = repo.Push(&gogit.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"}, - }); err != nil { - return nil, err - } - - return repo, nil -} - -func Test_commitFromFixture(t *testing.T) { - g := NewWithT(t) - - repo, err := gogit.Init(memory.NewStorage(), memfs.New()) - g.Expect(err).ToNot(HaveOccurred()) - - err = commitFromFixture(repo, "testdata/git/repository") - g.Expect(err).ToNot(HaveOccurred()) -} - -func commitFromFixture(repo *gogit.Repository, fixture string) error { - working, err := repo.Worktree() - if err != nil { - return err - } - fs := working.Filesystem - - if err = filepath.Walk(fixture, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return fs.MkdirAll(fs.Join(path[len(fixture):]), info.Mode()) - } - - fileBytes, err := os.ReadFile(path) - if err != nil { - return err - } - - ff, err := fs.Create(path[len(fixture):]) - if err != nil { - return err - } - defer ff.Close() - - _, err = ff.Write(fileBytes) - return err - }); err != nil { - return err - } - - _, err = working.Add(".") - if err != nil { - return err - } - - if _, err = working.Commit("Fixtures from "+fixture, &gogit.CommitOptions{ - Author: &object.Signature{ - Name: "Jane Doe", - Email: "jane@example.com", - When: time.Now(), - }, - }); err != nil { - return err - } - - return nil -} - -func remoteBranchForHead(repo *gogit.Repository, head *plumbing.Reference, branch string) error { - refSpec := fmt.Sprintf("%s:refs/heads/%s", head.Name(), branch) - return repo.Push(&gogit.PushOptions{ - RemoteName: "origin", - RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, - Force: true, - }) -} - -func remoteTagForHead(repo *gogit.Repository, head *plumbing.Reference, tag string) error { - if _, err := repo.CreateTag(tag, head.Hash(), &gogit.CreateTagOptions{ - // Not setting this seems to make things flaky - // Expected success, but got an error: - // <*errors.errorString | 0xc0000f6350>: { - // s: "tagger field is required", - // } - // tagger field is required - Tagger: &object.Signature{ - Name: "Jane Doe", - Email: "jane@example.com", - When: time.Now(), - }, - Message: tag, - }); err != nil { - return err - } - refSpec := fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", tag) - return repo.Push(&gogit.PushOptions{ - RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, - }) -} - -func TestGitRepositoryReconciler_statusConditions(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.GitRepository) - assertConditions []metav1.Condition - }{ - { - name: "multiple positive conditions", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit"), - }, - }, - { - name: "multiple failures", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error") - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error"), - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), - }, - }, - { - name: "mixed positive and negative conditions", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - obj := &sourcev1.GitRepository{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.GitRepositoryKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "gitrepo", - Namespace: "foo", - }, - } - clientBuilder := fake.NewClientBuilder() - clientBuilder.WithObjects(obj) - c := clientBuilder.Build() - - patchHelper, err := patch.NewHelper(obj, c) - g.Expect(err).ToNot(HaveOccurred()) - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - ctx := context.TODO() - recResult := sreconcile.ResultSuccess - var retErr error - - summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper) - summarizeOpts := []summarize.Option{ - summarize.WithConditions(gitRepositoryReadyCondition), - summarize.WithReconcileResult(recResult), - summarize.WithReconcileError(retErr), - summarize.WithIgnoreNotFound(), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), - summarize.WithPatchFieldOwner("source-controller"), - } - _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - - key := client.ObjectKeyFromObject(obj) - g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) - g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestGitRepositoryReconciler_notify(t *testing.T) { - concreteCommit := git.Commit{ - Hash: git.Hash("some-hash"), - Message: "test commit", - Encoded: []byte("content"), - } - partialCommit := git.Commit{ - Hash: git.Hash("some-hash"), - } - - noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason") - noopErr.Ignore = true - - tests := []struct { - name string - res sreconcile.Result - resErr error - oldObjBeforeFunc func(obj *sourcev1.GitRepository) - newObjBeforeFunc func(obj *sourcev1.GitRepository) - commit git.Commit - wantEvent string - }{ - { - name: "error - no event", - res: sreconcile.ResultEmpty, - resErr: errors.New("some error"), - }, - { - name: "new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - }, - commit: concreteCommit, - wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", - }, - { - name: "recovery from failure", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - commit: concreteCommit, - wantEvent: "Normal Succeeded stored artifact for commit 'test commit'", - }, - { - name: "recovery and new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - commit: concreteCommit, - wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", - }, - { - name: "no updates", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - }, - { - name: "no-op error result", - res: sreconcile.ResultEmpty, - resErr: noopErr, - oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - commit: partialCommit, // no-op will always result in partial commit. - wantEvent: "Normal Succeeded stored artifact for commit 'HEAD/some-hash'", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - recorder := record.NewFakeRecorder(32) - - oldObj := &sourcev1.GitRepository{} - newObj := oldObj.DeepCopy() - - if tt.oldObjBeforeFunc != nil { - tt.oldObjBeforeFunc(oldObj) - } - if tt.newObjBeforeFunc != nil { - tt.newObjBeforeFunc(newObj) - } - - reconciler := &GitRepositoryReconciler{ - EventRecorder: recorder, - features: features.FeatureGates(), - } - reconciler.notify(ctx, oldObj, newObj, tt.commit, tt.res, tt.resErr) - - select { - case x, ok := <-recorder.Events: - g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") - if tt.wantEvent != "" { - g.Expect(x).To(ContainSubstring(tt.wantEvent)) - } - default: - if tt.wantEvent != "" { - t.Errorf("expected some event to be emitted") - } - } - }) - } -} - -func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { - type dependency struct { - name string - withArtifact bool - conditions []metav1.Condition - } - - type include struct { - name string - fromPath string - toPath string - shouldExist bool - } - - tests := []struct { - name string - dependencies []dependency - includes []include - beforeFunc func(obj *sourcev1.GitRepository) - wantErr bool - wantArtifactSet artifactSet - assertConditions []metav1.Condition - }{ - { - name: "Existing includes", - dependencies: []dependency{ - { - name: "a", - withArtifact: true, - conditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), - }, - }, - { - name: "b", - withArtifact: true, - conditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), - }, - }, - }, - includes: []include{ - {name: "a", toPath: "a/", shouldExist: true}, - {name: "b", toPath: "b/", shouldExist: true}, - }, - wantErr: false, - wantArtifactSet: []*sourcev1.Artifact{ - {Revision: "a"}, - {Revision: "b"}, - }, - }, - { - name: "Include get failure", - includes: []include{ - {name: "a", toPath: "a/"}, - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), - }, - }, - { - name: "Include without an artifact makes IncludeUnavailable=True", - dependencies: []dependency{ - { - name: "a", - withArtifact: false, - conditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"), - }, - }, - }, - includes: []include{ - {name: "a", toPath: "a/"}, - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"), - }, - }, - { - name: "Outdated IncludeUnavailable is removed", - beforeFunc: func(obj *sourcev1.GitRepository) { - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "") - }, - assertConditions: []metav1.Condition{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - var depObjs []client.Object - for _, d := range tt.dependencies { - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: d.name, - }, - Status: sourcev1.GitRepositoryStatus{ - Conditions: d.conditions, - }, - } - if d.withArtifact { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: d.name + ".tar.gz", - Revision: d.name, - LastUpdateTime: metav1.Now(), - } - } - depObjs = append(depObjs, obj) - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - if len(tt.dependencies) > 0 { - builder.WithObjects(depObjs...) - } - - r := &GitRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - } - - obj := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "reconcile-include", - }, - Spec: sourcev1.GitRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - }, - } - - for i, incl := range tt.includes { - incl := sourcev1.GitRepositoryInclude{ - GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, - FromPath: incl.fromPath, - ToPath: incl.toPath, - } - tt.includes[i].fromPath = incl.GetFromPath() - tt.includes[i].toPath = incl.GetToPath() - obj.Spec.Include = append(obj.Spec.Include, incl) - } - - gotArtifactSet, err := r.fetchIncludes(ctx, obj) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - if !tt.wantErr && gotArtifactSet != nil { - g.Expect(gotArtifactSet.Diff(tt.wantArtifactSet)).To(BeFalse()) - } - }) - } -} - -func TestGitRepositoryReconciler_calculateContentConfigChecksum(t *testing.T) { - g := NewWithT(t) - obj := &sourcev1.GitRepository{} - r := &GitRepositoryReconciler{} - - emptyChecksum := r.calculateContentConfigChecksum(obj, nil) - g.Expect(emptyChecksum).To(Equal(emptyContentConfigChecksum)) - - // Ignore modified. - obj.Spec.Ignore = pointer.String("some-rule") - ignoreModChecksum := r.calculateContentConfigChecksum(obj, nil) - g.Expect(emptyChecksum).ToNot(Equal(ignoreModChecksum)) - - // Recurse submodules modified. - obj.Spec.RecurseSubmodules = true - submodModChecksum := r.calculateContentConfigChecksum(obj, nil) - g.Expect(ignoreModChecksum).ToNot(Equal(submodModChecksum)) - - // Include modified. - obj.Spec.Include = []sourcev1.GitRepositoryInclude{ - { - GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, - FromPath: "aaa", - ToPath: "bbb", - }, - } - artifacts := &artifactSet{ - &sourcev1.Artifact{Revision: "some-revision-1", Checksum: "some-checksum-1"}, - } - includeModChecksum := r.calculateContentConfigChecksum(obj, artifacts) - g.Expect(submodModChecksum).ToNot(Equal(includeModChecksum)) - - // Artifact modified revision. - artifacts = &artifactSet{ - &sourcev1.Artifact{Revision: "some-revision-2", Checksum: "some-checksum-1"}, - } - artifactModChecksum := r.calculateContentConfigChecksum(obj, artifacts) - g.Expect(includeModChecksum).ToNot(Equal(artifactModChecksum)) - - // Artifact modified checksum. - artifacts = &artifactSet{ - &sourcev1.Artifact{Revision: "some-revision-2", Checksum: "some-checksum-2"}, - } - artifactCsumModChecksum := r.calculateContentConfigChecksum(obj, artifacts) - g.Expect(artifactModChecksum).ToNot(Equal(artifactCsumModChecksum)) -} - -func resetChmod(path string, dirMode os.FileMode, fileMode os.FileMode) error { - err := filepath.Walk(path, - func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() && info.Mode() != dirMode { - os.Chmod(path, dirMode) - } else if !info.IsDir() && info.Mode() != fileMode { - os.Chmod(path, fileMode) - } - return nil - }) - if err != nil { - return fmt.Errorf("cannot reset file permissions: %v", err) - } - - return nil -} diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go deleted file mode 100644 index 26e771c5a..000000000 --- a/controllers/helmchart_controller_test.go +++ /dev/null @@ -1,2047 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - "time" - - "github.com/darkowlzz/controller-check/status" - . "github.com/onsi/gomega" - hchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" - helmreg "helm.sh/helm/v3/pkg/registry" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/helmtestserver" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" - "github.com/fluxcd/pkg/testserver" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - serror "github.com/fluxcd/source-controller/internal/error" - "github.com/fluxcd/source-controller/internal/helm/chart" - "github.com/fluxcd/source-controller/internal/helm/registry" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/internal/reconcile/summarize" -) - -func TestHelmChartReconciler_Reconcile(t *testing.T) { - g := NewWithT(t) - - const ( - chartName = "helmchart" - chartVersion = "0.2.0" - chartPath = "testdata/charts/helmchart" - ) - - serverFactory, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(serverFactory.Root()) - - g.Expect(serverFactory.PackageChartWithVersion(chartPath, chartVersion)).To(Succeed()) - g.Expect(serverFactory.GenerateIndex()).To(Succeed()) - - tests := []struct { - name string - beforeFunc func(repository *sourcev1.HelmRepository) - assertFunc func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) - }{ - { - name: "Reconciles chart build", - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for HelmChart to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) || obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Patch the object with reconcile request annotation. - patchHelper, err := patch.NewHelper(obj, testEnv.Client) - g.Expect(err).ToNot(HaveOccurred()) - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return obj.Status.LastHandledReconcileAt == "now" - }, timeout).Should(BeTrue()) - - // Check if the cache contains the index. - repoKey := client.ObjectKey{Name: repository.Name, Namespace: repository.Namespace} - err = testEnv.Get(ctx, repoKey, repository) - g.Expect(err).ToNot(HaveOccurred()) - localPath := testStorage.LocalPath(*repository.GetArtifact()) - _, found := testCache.Get(localPath) - g.Expect(found).To(BeTrue()) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmChart to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) - }, - }, - { - name: "Stalling on invalid repository URL", - beforeFunc: func(repository *sourcev1.HelmRepository) { - repository.Spec.URL = "://unsupported" // Invalid URL - }, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - // Wait for HelmChart to be FetchFailed == true - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) { - return false - } - // observedGeneration is -1 because we have no successful reconciliation - return obj.Status.ObservedGeneration == -1 - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmChart to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) - }, - }, - { - name: "Stalling on invalid oci repository URL", - beforeFunc: func(repository *sourcev1.HelmRepository) { - repository.Spec.URL = strings.Replace(repository.Spec.URL, "http", "oci", 1) - }, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - // Wait for HelmChart to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) { - return false - } - // observedGeneration is -1 because we have no successful reconciliation - return obj.Status.ObservedGeneration == -1 - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmChart to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - server := testserver.NewHTTPServer(serverFactory.Root()) - server.Start() - defer server.Stop() - - ns, err := testEnv.CreateNamespace(ctx, "helmchart") - g.Expect(err).ToNot(HaveOccurred()) - defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - - repository := sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - Namespace: ns.Name, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: server.URL(), - }, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(&repository) - } - - g.Expect(testEnv.CreateAndWait(ctx, &repository)).To(Succeed()) - - obj := sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-reconcile-", - Namespace: ns.Name, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: chartName, - Version: chartVersion, - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repository.Name, - }, - }, - } - g.Expect(testEnv.Create(ctx, &obj)).To(Succeed()) - - if tt.assertFunc != nil { - tt.assertFunc(g, &obj, &repository) - } - }) - } -} - -func TestHelmChartReconciler_reconcileStorage(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.HelmChart, storage *Storage) error - want sreconcile.Result - wantErr bool - assertArtifact *sourcev1.Artifact - assertConditions []metav1.Condition - assertPaths []string - }{ - { - name: "garbage collects", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { - revisions := []string{"a", "b", "c", "d"} - for n := range revisions { - v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), - Revision: v, - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { - return err - } - if n != len(revisions)-1 { - time.Sleep(time.Second * 1) - } - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/d.txt", - Revision: "d", - Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", - URL: testStorage.Hostname + "/reconcile-storage/d.txt", - Size: int64p(int64(len("d"))), - }, - assertPaths: []string{ - "/reconcile-storage/d.txt", - "/reconcile-storage/c.txt", - "!/reconcile-storage/b.txt", - "!/reconcile-storage/a.txt", - }, - want: sreconcile.ResultSuccess, - }, - { - name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/reconcile-storage/invalid.txt", - Revision: "d", - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "!/reconcile-storage/invalid.txt", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), - }, - }, - { - name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: "http://outdated.com/reconcile-storage/hostname.txt", - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { - return err - } - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "/reconcile-storage/hostname.txt", - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", - Size: int64p(int64(len("file"))), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - defer func() { - g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) - }() - - r := &HelmChartReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - }, - } - if tt.beforeFunc != nil { - g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) - } - - got, err := r.reconcileStorage(context.TODO(), obj, nil) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) - if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { - g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) - } - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - for _, p := range tt.assertPaths { - absoluteP := filepath.Join(testStorage.BasePath, p) - if !strings.HasPrefix(p, "!") { - g.Expect(absoluteP).To(BeAnExistingFile()) - continue - } - g.Expect(absoluteP).NotTo(BeAnExistingFile()) - } - }) - } -} - -func TestHelmChartReconciler_reconcileSource(t *testing.T) { - g := NewWithT(t) - - tmpDir := t.TempDir() - - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) - g.Expect(err).ToNot(HaveOccurred()) - - gitArtifact := &sourcev1.Artifact{ - Revision: "mock-ref/abcdefg12345678", - Path: "mock.tgz", - } - g.Expect(storage.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) - - tests := []struct { - name string - source sourcev1.Source - beforeFunc func(obj *sourcev1.HelmChart) - want sreconcile.Result - wantErr error - assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart) - cleanFunc func(g *WithT, build *chart.Build) - }{ - { - name: "Observes Artifact revision and build result", - source: &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gitrepository", - Namespace: "default", - }, - Status: sourcev1.GitRepositoryStatus{ - Artifact: gitArtifact, - }, - }, - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ - Name: "gitrepository", - Kind: sourcev1.GitRepositoryKind, - } - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { - g.Expect(build.Complete()).To(BeTrue()) - g.Expect(build.Name).To(Equal("helmchart")) - g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).To(BeARegularFile()) - - g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled 'helmchart' chart with version '0.1.0'"), - })) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Error on unavailable source", - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ - Name: "unavailable", - Kind: sourcev1.GitRepositoryKind, - } - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Event{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")}, - assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found"), - })) - }, - }, - { - name: "Stalling on unsupported source kind", - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ - Name: "unavailable", - Kind: "Unsupported", - } - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")}, - assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: unsupported source kind"), - })) - }, - }, - { - name: "Stalling on persistent build error", - source: &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gitrepository", - Namespace: "default", - }, - Status: sourcev1.GitRepositoryStatus{ - Artifact: gitArtifact, - }, - }, - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ - Name: "gitrepository", - Kind: sourcev1.GitRepositoryKind, - } - obj.Spec.ValuesFiles = []string{"invalid.yaml"} - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")}, - assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ValuesFilesError", "values files merge error: no values file found at path"), - })) - }, - }, - { - name: "ResultRequeue when source artifact is unavailable", - source: &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gitrepository", - Namespace: "default", - }, - Status: sourcev1.GitRepositoryStatus{}, - }, - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ - Name: "gitrepository", - Kind: sourcev1.GitRepositoryKind, - } - obj.Status.ObservedSourceArtifactRevision = "foo" - }, - want: sreconcile.ResultRequeue, - assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo")) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available"), - })) - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - clientBuilder := fake.NewClientBuilder() - if tt.source != nil { - clientBuilder.WithRuntimeObjects(tt.source) - } - - r := &HelmChartReconciler{ - Client: clientBuilder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: storage, - } - - obj := sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: "chart", - Namespace: "default", - }, - Spec: sourcev1.HelmChartSpec{}, - } - if tt.beforeFunc != nil { - tt.beforeFunc(&obj) - } - - var b chart.Build - if tt.cleanFunc != nil { - defer tt.cleanFunc(g, &b) - } - - got, err := r.reconcileSource(context.TODO(), &obj, &b) - - g.Expect(err != nil).To(Equal(tt.wantErr != nil)) - if tt.wantErr != nil { - g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) - } - g.Expect(got).To(Equal(tt.want)) - - if tt.assertFunc != nil { - tt.assertFunc(g, b, obj) - } - }) - } -} - -func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { - g := NewWithT(t) - - const ( - chartName = "helmchart" - chartVersion = "0.2.0" - higherChartVersion = "0.3.0" - chartPath = "testdata/charts/helmchart" - ) - - serverFactory, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(serverFactory.Root()) - - for _, ver := range []string{chartVersion, higherChartVersion} { - g.Expect(serverFactory.PackageChartWithVersion(chartPath, ver)).To(Succeed()) - } - g.Expect(serverFactory.GenerateIndex()).To(Succeed()) - - type options struct { - username string - password string - } - - tests := []struct { - name string - server options - secret *corev1.Secret - beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) - want sreconcile.Result - wantErr error - assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) - cleanFunc func(g *WithT, build *chart.Build) - }{ - { - name: "Reconciles chart build", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = "helmchart" - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(chartName)) - g.Expect(build.Version).To(Equal(higherChartVersion)) - g.Expect(build.Path).ToNot(BeEmpty()) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Reconciles chart build with repository credentials", - server: options{ - username: "foo", - password: "bar", - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "auth", - }, - Data: map[string][]byte{ - "username": []byte("foo"), - "password": []byte("bar"), - }, - }, - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = chartName - obj.Spec.Version = chartVersion - repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(chartName)) - g.Expect(build.Version).To(Equal(chartVersion)) - g.Expect(build.Path).ToNot(BeEmpty()) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Uses artifact as build cache", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = chartName - obj.Spec.Version = chartVersion - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(chartName)) - g.Expect(build.Version).To(Equal(chartVersion)) - g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) - g.Expect(build.Path).To(BeARegularFile()) - }, - }, - { - name: "Sets Generation as VersionMetadata with values files", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = chartName - obj.Generation = 3 - obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(chartName)) - g.Expect(build.Version).To(Equal(higherChartVersion + "+3")) - g.Expect(build.Path).ToNot(BeEmpty()) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Forces build on generation change", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Generation = 3 - obj.Spec.Chart = chartName - obj.Spec.Version = chartVersion - - obj.Status.ObservedGeneration = 2 - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(chartName)) - g.Expect(build.Version).To(Equal(chartVersion)) - g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Event on unsuccessful secret retrieval", - beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - repository.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "invalid", - } - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Event{Err: errors.New("failed to get secret 'invalid'")}, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret 'invalid'"), - })) - }, - }, - { - name: "Stalling on invalid client options", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - repository.Spec.URL = "file://unsupported" // Unsupported protocol - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")}, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), - })) - }, - }, - { - name: "Stalling on invalid repository URL", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - repository.Spec.URL = "://unsupported" // Invalid URL - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")}, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "invalid Helm repository URL"), - })) - }, - }, - { - name: "BuildError on temporary build error", - beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { - obj.Spec.Chart = "invalid" - }, - want: sreconcile.ResultEmpty, - wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - server := testserver.NewHTTPServer(serverFactory.Root()) - server.Start() - defer server.Stop() - - if len(tt.server.username+tt.server.password) > 0 { - server.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || u != tt.server.username || p != tt.server.password { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - } - - clientBuilder := fake.NewClientBuilder() - if tt.secret != nil { - clientBuilder.WithObjects(tt.secret.DeepCopy()) - } - - storage, err := newTestStorage(server) - g.Expect(err).ToNot(HaveOccurred()) - - r := &HelmChartReconciler{ - Client: clientBuilder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Getters: testGetters, - Storage: storage, - } - - repository := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: server.URL(), - Timeout: &metav1.Duration{Duration: timeout}, - }, - Status: sourcev1.HelmRepositoryStatus{ - Artifact: &sourcev1.Artifact{ - Path: "index.yaml", - }, - }, - } - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - }, - Spec: sourcev1.HelmChartSpec{}, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj, repository) - } - - var b chart.Build - if tt.cleanFunc != nil { - defer tt.cleanFunc(g, &b) - } - got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) - - g.Expect(err != nil).To(Equal(tt.wantErr != nil)) - if tt.wantErr != nil { - g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) - } - g.Expect(got).To(Equal(tt.want)) - - if tt.assertFunc != nil { - tt.assertFunc(g, obj, b) - } - }) - } -} - -func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { - g := NewWithT(t) - - tmpDir := t.TempDir() - - const ( - chartPath = "testdata/charts/helmchart-0.1.0.tgz" - ) - - // Login to the registry - err := testRegistryServer.registryClient.Login(testRegistryServer.registryHost, - helmreg.LoginOptBasicAuth(testRegistryUsername, testRegistryPassword), - helmreg.LoginOptInsecure(true)) - g.Expect(err).NotTo(HaveOccurred()) - - // Load a test chart - chartData, err := ioutil.ReadFile(chartPath) - g.Expect(err).NotTo(HaveOccurred()) - metadata, err := extractChartMeta(chartData) - g.Expect(err).NotTo(HaveOccurred()) - - // Upload the test chart - ref := fmt.Sprintf("%s/testrepo/%s:%s", testRegistryServer.registryHost, metadata.Name, metadata.Version) - _, err = testRegistryServer.registryClient.Push(chartData, ref) - g.Expect(err).NotTo(HaveOccurred()) - - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) - g.Expect(err).ToNot(HaveOccurred()) - - cachedArtifact := &sourcev1.Artifact{ - Revision: "0.1.0", - Path: metadata.Name + "-" + metadata.Version + ".tgz", - } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) - - tests := []struct { - name string - secret *corev1.Secret - beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) - want sreconcile.Result - wantErr error - assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) - cleanFunc func(g *WithT, build *chart.Build) - }{ - { - name: "Reconciles chart build with docker repository credentials", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "auth", - }, - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - ".dockerconfigjson": []byte(`{"auths":{"` + - testRegistryServer.registryHost + `":{"` + - `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`), - }, - }, - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = metadata.Name - obj.Spec.Version = metadata.Version - repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(metadata.Name)) - g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).ToNot(BeEmpty()) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Reconciles chart build with repository credentials", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "auth", - }, - Data: map[string][]byte{ - "username": []byte(testRegistryUsername), - "password": []byte(testRegistryPassword), - }, - }, - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = metadata.Name - obj.Spec.Version = metadata.Version - repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(metadata.Name)) - g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).ToNot(BeEmpty()) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Uses artifact as build cache", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Spec.Chart = metadata.Name - obj.Spec.Version = metadata.Version - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(metadata.Name)) - g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) - g.Expect(build.Path).To(BeARegularFile()) - }, - }, - { - name: "Forces build on generation change", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - obj.Generation = 3 - obj.Spec.Chart = metadata.Name - obj.Spec.Version = metadata.Version - - obj.Status.ObservedGeneration = 2 - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Name).To(Equal(metadata.Name)) - g.Expect(build.Version).To(Equal(metadata.Version)) - fmt.Println("buildpath", build.Path) - fmt.Println("storage Path", storage.LocalPath(*cachedArtifact.DeepCopy())) - g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Event on unsuccessful secret retrieval", - beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - repository.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "invalid", - } - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Event{Err: errors.New("failed to get secret 'invalid'")}, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret 'invalid'"), - })) - }, - }, - { - name: "Stalling on invalid client options", - beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { - repository.Spec.URL = "https://unsupported" // Unsupported protocol - }, - want: sreconcile.ResultEmpty, - wantErr: &serror.Stalling{Err: errors.New("failed to construct Helm client: invalid OCI registry URL: https://unsupported")}, - assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), - })) - }, - }, - { - name: "BuildError on temporary build error", - beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { - obj.Spec.Chart = "invalid" - }, - want: sreconcile.ResultEmpty, - wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - clientBuilder := fake.NewClientBuilder() - if tt.secret != nil { - clientBuilder.WithObjects(tt.secret.DeepCopy()) - } - - r := &HelmChartReconciler{ - Client: clientBuilder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Getters: testGetters, - Storage: storage, - RegistryClientGenerator: registry.ClientGenerator, - } - - repository := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: fmt.Sprintf("oci://%s/testrepo", testRegistryServer.registryHost), - Timeout: &metav1.Duration{Duration: timeout}, - Type: sourcev1.HelmRepositoryTypeOCI, - }, - } - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - }, - Spec: sourcev1.HelmChartSpec{}, - } - - if tt.beforeFunc != nil { - tt.beforeFunc(obj, repository) - } - - var b chart.Build - if tt.cleanFunc != nil { - defer tt.cleanFunc(g, &b) - } - got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) - - g.Expect(err != nil).To(Equal(tt.wantErr != nil)) - if tt.wantErr != nil { - g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) - } - g.Expect(got).To(Equal(tt.want)) - - if tt.assertFunc != nil { - tt.assertFunc(g, obj, b) - } - }) - } -} - -func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { - g := NewWithT(t) - - tmpDir := t.TempDir() - - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) - g.Expect(err).ToNot(HaveOccurred()) - - chartsArtifact := &sourcev1.Artifact{ - Revision: "mock-ref/abcdefg12345678", - Path: "mock.tgz", - } - g.Expect(storage.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) - yamlArtifact := &sourcev1.Artifact{ - Revision: "9876abcd", - Path: "values.yaml", - } - g.Expect(storage.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) - cachedArtifact := &sourcev1.Artifact{ - Revision: "0.1.0", - Path: "cached.tgz", - } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) - - tests := []struct { - name string - source sourcev1.Artifact - beforeFunc func(obj *sourcev1.HelmChart) - want sreconcile.Result - wantErr error - assertFunc func(g *WithT, build chart.Build) - cleanFunc func(g *WithT, build *chart.Build) - }{ - { - name: "Resolves chart dependencies and builds", - source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.Chart = "testdata/charts/helmchartwithdeps" - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Name).To(Equal("helmchartwithdeps")) - g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.ResolvedDependencies).To(Equal(4)) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "ReconcileStrategyRevision sets VersionMetadata", - source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.Chart = "testdata/charts/helmchart" - obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind - obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Name).To(Equal("helmchart")) - g.Expect(build.Version).To(Equal("0.1.0+abcdefg12345")) - g.Expect(build.ResolvedDependencies).To(Equal(0)) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "ValuesFiles sets Generation as VersionMetadata", - source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Generation = 3 - obj.Spec.Chart = "testdata/charts/helmchart" - obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind - obj.Spec.ValuesFiles = []string{ - filepath.Join(obj.Spec.Chart, "values.yaml"), - filepath.Join(obj.Spec.Chart, "override.yaml"), - } - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Name).To(Equal("helmchart")) - g.Expect(build.Version).To(Equal("0.1.0+3")) - g.Expect(build.ResolvedDependencies).To(Equal(0)) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Chart from storage cache", - source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Status.Artifact = cachedArtifact.DeepCopy() - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Name).To(Equal("helmchart")) - g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) - g.Expect(build.Path).To(BeARegularFile()) - }, - }, - { - name: "Generation change forces rebuild", - source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Generation = 2 - obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Status.Artifact = cachedArtifact.DeepCopy() - obj.Status.ObservedGeneration = 1 - }, - want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Name).To(Equal("helmchart")) - g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) - g.Expect(build.Path).To(BeARegularFile()) - }, - cleanFunc: func(g *WithT, build *chart.Build) { - g.Expect(os.Remove(build.Path)).To(Succeed()) - }, - }, - { - name: "Empty source artifact", - source: sourcev1.Artifact{}, - want: sreconcile.ResultEmpty, - wantErr: &serror.Event{Err: errors.New("no such file or directory")}, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - }, - }, - { - name: "Invalid artifact type", - source: *yamlArtifact, - want: sreconcile.ResultEmpty, - wantErr: &serror.Event{Err: errors.New("artifact untar error: requires gzip-compressed body")}, - assertFunc: func(g *WithT, build chart.Build) { - g.Expect(build.Complete()).To(BeFalse()) - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &HelmChartReconciler{ - Client: fake.NewClientBuilder().Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: storage, - Getters: testGetters, - RegistryClientGenerator: registry.ClientGenerator, - } - - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: "artifact", - Namespace: "default", - }, - Spec: sourcev1.HelmChartSpec{}, - } - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - var b chart.Build - if tt.cleanFunc != nil { - defer tt.cleanFunc(g, &b) - } - - got, err := r.buildFromTarballArtifact(context.TODO(), obj, tt.source, &b) - g.Expect(err != nil).To(Equal(tt.wantErr != nil)) - if tt.wantErr != nil { - g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) - } - g.Expect(got).To(Equal(tt.want)) - - if tt.assertFunc != nil { - tt.assertFunc(g, b) - } - }) - } -} - -func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { - tests := []struct { - name string - build *chart.Build - beforeFunc func(obj *sourcev1.HelmChart) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - afterFunc func(t *WithT, obj *sourcev1.HelmChart) - }{ - { - name: "Incomplete build requeues and does not update status", - build: &chart.Build{}, - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - want: sreconcile.ResultRequeue, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "Foo", ""), - }, - }, - { - name: "Copying artifact to storage from build makes ArtifactInStorage=True", - build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) - t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) - t.Expect(obj.Status.URL).ToNot(BeEmpty()) - t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), - }, - }, - { - name: "Up-to-date chart build does not persist artifact to storage", - build: &chart.Build{ - Name: "helmchart", - Version: "0.1.0", - Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), - }, - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "testdata/charts/helmchart-0.1.0.tgz", - } - }, - want: sreconcile.ResultSuccess, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) - t.Expect(obj.Status.ObservedChartName).To(BeEmpty()) - t.Expect(obj.Status.URL).To(BeEmpty()) - }, - }, - { - name: "Restores conditions in case artifact matches current chart build", - build: &chart.Build{ - Name: "helmchart", - Version: "0.1.0", - Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), - Packaged: true, - }, - beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.ObservedChartName = "helmchart" - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: "0.1.0", - Path: "testdata/charts/helmchart-0.1.0.tgz", - } - }, - want: sreconcile.ResultSuccess, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) - t.Expect(obj.Status.URL).To(BeEmpty()) - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), - }, - }, - { - name: "Removes ArtifactOutdatedCondition after creating new artifact", - build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) - t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) - t.Expect(obj.Status.URL).ToNot(BeEmpty()) - t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), - }, - }, - { - name: "Creates latest symlink to the created artifact", - build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), - afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { - t.Expect(obj.GetArtifact()).ToNot(BeNil()) - - localPath := testStorage.LocalPath(*obj.GetArtifact()) - symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") - targetFile, err := os.Readlink(symlinkPath) - t.Expect(err).NotTo(HaveOccurred()) - t.Expect(localPath).To(Equal(targetFile)) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &HelmChartReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "reconcile-artifact-", - Generation: 1, - }, - Status: sourcev1.HelmChartStatus{}, - } - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - got, err := r.reconcileArtifact(ctx, obj, tt.build) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - if tt.afterFunc != nil { - tt.afterFunc(g, obj) - } - }) - } -} - -func TestHelmChartReconciler_getHelmRepositorySecret(t *testing.T) { - mock := &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "secret", - Namespace: "foo", - }, - Data: map[string][]byte{ - "key": []byte("bar"), - }, - } - clientBuilder := fake.NewClientBuilder() - clientBuilder.WithObjects(mock) - - r := &HelmChartReconciler{ - Client: clientBuilder.Build(), - } - - tests := []struct { - name string - repository *sourcev1.HelmRepository - want *corev1.Secret - wantErr bool - }{ - { - name: "Existing secret reference", - repository: &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: mock.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - SecretRef: &meta.LocalObjectReference{ - Name: mock.Name, - }, - }, - }, - want: mock, - }, - { - name: "Empty secret reference", - repository: &sourcev1.HelmRepository{ - Spec: sourcev1.HelmRepositorySpec{ - SecretRef: nil, - }, - }, - want: nil, - }, - { - name: "Error on client error", - repository: &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "different", - }, - Spec: sourcev1.HelmRepositorySpec{ - SecretRef: &meta.LocalObjectReference{ - Name: mock.Name, - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - got, err := r.getHelmRepositorySecret(context.TODO(), tt.repository) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - }) - } -} - -func TestHelmChartReconciler_getSource(t *testing.T) { - mocks := []client.Object{ - &sourcev1.HelmRepository{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.HelmRepositoryKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "helmrepository", - Namespace: "foo", - }, - }, - &sourcev1.GitRepository{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.GitRepositoryKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "gitrepository", - Namespace: "foo", - }, - }, - &sourcev1.Bucket{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.BucketKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "bucket", - Namespace: "foo", - }, - }, - } - clientBuilder := fake.NewClientBuilder() - clientBuilder.WithObjects(mocks...) - - r := &HelmChartReconciler{ - Client: clientBuilder.Build(), - } - - tests := []struct { - name string - obj *sourcev1.HelmChart - want sourcev1.Source - wantErr bool - }{ - { - name: "Get HelmRepository source for reference", - obj: &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: mocks[0].GetNamespace(), - }, - Spec: sourcev1.HelmChartSpec{ - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Name: mocks[0].GetName(), - Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind, - }, - }, - }, - want: mocks[0].(sourcev1.Source), - }, - { - name: "Get GitRepository source for reference", - obj: &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: mocks[1].GetNamespace(), - }, - Spec: sourcev1.HelmChartSpec{ - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Name: mocks[1].GetName(), - Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind, - }, - }, - }, - want: mocks[1].(sourcev1.Source), - }, - { - name: "Get Bucket source for reference", - obj: &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: mocks[2].GetNamespace(), - }, - Spec: sourcev1.HelmChartSpec{ - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Name: mocks[2].GetName(), - Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, - }, - }, - }, - want: mocks[2].(sourcev1.Source), - }, - { - name: "Error on client error", - obj: &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: mocks[2].GetNamespace(), - }, - Spec: sourcev1.HelmChartSpec{ - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Name: mocks[1].GetName(), - Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, - }, - }, - }, - wantErr: true, - }, - { - name: "Error on unsupported source kind", - obj: &sourcev1.HelmChart{ - Spec: sourcev1.HelmChartSpec{ - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Name: "unsupported", - Kind: "Unsupported", - }, - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - got, err := r.getSource(context.TODO(), tt.obj) - - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - g.Expect(got).To(BeNil()) - return - } - - g.Expect(got).To(Equal(tt.want)) - g.Expect(err).ToNot(HaveOccurred()) - }) - } -} - -func TestHelmChartReconciler_reconcileDelete(t *testing.T) { - g := NewWithT(t) - - r := &HelmChartReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: "reconcile-delete-", - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{ - sourcev1.SourceFinalizer, - }, - }, - Status: sourcev1.HelmChartStatus{}, - } - - artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") - obj.Status.Artifact = &artifact - - got, err := r.reconcileDelete(ctx, obj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(sreconcile.ResultEmpty)) - g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) - g.Expect(obj.Status.Artifact).To(BeNil()) -} - -func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { - // Helper to build simple helmChartReconcileFunc with result and error. - buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcileFunc { - return func(_ context.Context, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { - return r, e - } - } - - tests := []struct { - name string - generation int64 - observedGeneration int64 - reconcileFuncs []helmChartReconcileFunc - wantResult sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "successful reconciliations", - reconcileFuncs: []helmChartReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - }, - wantResult: sreconcile.ResultSuccess, - wantErr: false, - }, - { - name: "successful reconciliation with generation difference", - generation: 3, - observedGeneration: 2, - reconcileFuncs: []helmChartReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - }, - wantResult: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"), - }, - }, - { - name: "failed reconciliation", - reconcileFuncs: []helmChartReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), - }, - wantResult: sreconcile.ResultEmpty, - wantErr: true, - }, - { - name: "multiple object status conditions mutations", - reconcileFuncs: []helmChartReconcileFunc{ - func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") - return sreconcile.ResultSuccess, nil - }, - func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { - conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") - return sreconcile.ResultSuccess, nil - }, - }, - wantResult: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"), - }, - }, - { - name: "subrecs with one result=Requeue, no error", - reconcileFuncs: []helmChartReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - buildReconcileFuncs(sreconcile.ResultRequeue, nil), - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - }, - wantResult: sreconcile.ResultRequeue, - wantErr: false, - }, - { - name: "subrecs with error before result=Requeue", - reconcileFuncs: []helmChartReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), - buildReconcileFuncs(sreconcile.ResultRequeue, nil), - }, - wantResult: sreconcile.ResultEmpty, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &HelmChartReconciler{} - obj := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - Generation: tt.generation, - }, - Status: sourcev1.HelmChartStatus{ - ObservedGeneration: tt.observedGeneration, - }, - } - - got, err := r.reconcile(context.TODO(), obj, tt.reconcileFuncs) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.wantResult)) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func mockChartBuild(name, version, path string) *chart.Build { - var copyP string - if path != "" { - f, err := os.Open(path) - if err == nil { - defer f.Close() - ff, err := os.CreateTemp("", "chart-mock-*.tgz") - if err == nil { - defer ff.Close() - if _, err = io.Copy(ff, f); err == nil { - copyP = ff.Name() - } - } - } - } - return &chart.Build{ - Name: name, - Version: version, - Path: copyP, - } -} - -func TestHelmChartReconciler_statusConditions(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.HelmChart) - assertConditions []metav1.Condition - }{ - { - name: "positive conditions only", - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - { - name: "multiple failures", - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") - conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, "ChartPackageError", "some error") - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartPackageError", "some error"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), - }, - }, - { - name: "mixed positive and negative conditions", - beforeFunc: func(obj *sourcev1.HelmChart) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - obj := &sourcev1.HelmChart{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.HelmChartKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "helmchart", - Namespace: "foo", - }, - } - clientBuilder := fake.NewClientBuilder() - clientBuilder.WithObjects(obj) - c := clientBuilder.Build() - - patchHelper, err := patch.NewHelper(obj, c) - g.Expect(err).ToNot(HaveOccurred()) - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - ctx := context.TODO() - recResult := sreconcile.ResultSuccess - var retErr error - - summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper) - summarizeOpts := []summarize.Option{ - summarize.WithConditions(helmChartReadyCondition), - summarize.WithReconcileResult(recResult), - summarize.WithReconcileError(retErr), - summarize.WithIgnoreNotFound(), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), - summarize.WithPatchFieldOwner("source-controller"), - } - _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - - key := client.ObjectKeyFromObject(obj) - g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) - g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestHelmChartReconciler_notify(t *testing.T) { - tests := []struct { - name string - res sreconcile.Result - resErr error - oldObjBeforeFunc func(obj *sourcev1.HelmChart) - newObjBeforeFunc func(obj *sourcev1.HelmChart) - wantEvent string - }{ - { - name: "error - no event", - res: sreconcile.ResultEmpty, - resErr: errors.New("some error"), - }, - { - name: "new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - }, - wantEvent: "Normal ChartPackageSucceeded packaged", - }, - { - name: "recovery from failure", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal ChartPackageSucceeded packaged", - }, - { - name: "recovery and new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal ChartPackageSucceeded packaged", - }, - { - name: "no updates", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - recorder := record.NewFakeRecorder(32) - - oldObj := &sourcev1.HelmChart{} - newObj := oldObj.DeepCopy() - - if tt.oldObjBeforeFunc != nil { - tt.oldObjBeforeFunc(oldObj) - } - if tt.newObjBeforeFunc != nil { - tt.newObjBeforeFunc(newObj) - } - - reconciler := &HelmChartReconciler{ - EventRecorder: recorder, - } - build := &chart.Build{ - Name: "foo", - Version: "1.0.0", - Path: "some/path", - Packaged: true, - } - reconciler.notify(ctx, oldObj, newObj, build, tt.res, tt.resErr) - - select { - case x, ok := <-recorder.Events: - g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") - if tt.wantEvent != "" { - g.Expect(x).To(ContainSubstring(tt.wantEvent)) - } - default: - if tt.wantEvent != "" { - t.Errorf("expected some event to be emitted") - } - } - }) - } -} - -// extractChartMeta is used to extract a chart metadata from a byte array -func extractChartMeta(chartData []byte) (*hchart.Metadata, error) { - ch, err := loader.LoadArchive(bytes.NewReader(chartData)) - if err != nil { - return nil, err - } - return ch.Metadata, nil -} diff --git a/controllers/helmrepository_controller_oci.go b/controllers/helmrepository_controller_oci.go deleted file mode 100644 index a7d812fa0..000000000 --- a/controllers/helmrepository_controller_oci.go +++ /dev/null @@ -1,368 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "errors" - "fmt" - "net/url" - "os" - "time" - - helmgetter "helm.sh/helm/v3/pkg/getter" - helmreg "helm.sh/helm/v3/pkg/registry" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/uuid" - kuberecorder "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" - helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/patch" - "github.com/fluxcd/pkg/runtime/predicates" - - "github.com/fluxcd/source-controller/api/v1beta2" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - "github.com/fluxcd/source-controller/internal/helm/registry" - "github.com/fluxcd/source-controller/internal/helm/repository" - "github.com/fluxcd/source-controller/internal/object" - intpredicates "github.com/fluxcd/source-controller/internal/predicates" -) - -var helmRepositoryOCIOwnedConditions = []string{ - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, -} - -var helmRepositoryOCINegativeConditions = []string{ - meta.StalledCondition, - meta.ReconcilingCondition, -} - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -// HelmRepositoryOCI Reconciler reconciles a v1beta2.HelmRepository object of type OCI. -type HelmRepositoryOCIReconciler struct { - client.Client - kuberecorder.EventRecorder - helper.Metrics - Getters helmgetter.Providers - ControllerName string - RegistryClientGenerator RegistryClientGeneratorFunc -} - -// RegistryClientGeneratorFunc is a function that returns a registry client -// and an optional file name. -// The file is used to store the registry client credentials. -// The caller is responsible for deleting the file. -type RegistryClientGeneratorFunc func(isLogin bool) (*helmreg.Client, string, error) - -func (r *HelmRepositoryOCIReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) -} - -func (r *HelmRepositoryOCIReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error { - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.HelmRepository{}). - WithEventFilter( - predicate.And( - intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeOCI}, - predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), - ), - ). - WithOptions(controller.Options{ - MaxConcurrentReconciles: opts.MaxConcurrentReconciles, - RateLimiter: opts.RateLimiter, - RecoverPanic: true, - }). - Complete(r) -} - -func (r *HelmRepositoryOCIReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { - start := time.Now() - log := ctrl.LoggerFrom(ctx). - // Sets a reconcile ID to correlate logs from all suboperations. - WithValues("reconcileID", uuid.NewUUID()) - - // logger will be associated to the new context that is - // returned from ctrl.LoggerInto. - ctx = ctrl.LoggerInto(ctx, log) - - // Fetch the HelmRepository - obj := &sourcev1.HelmRepository{} - if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Record suspended status metric - r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - - // Return early if the object is suspended - if obj.Spec.Suspend { - log.Info("reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // Initialize the patch helper with the current version of the object. - patchHelper, err := patch.NewHelper(obj, r.Client) - if err != nil { - return ctrl.Result{}, err - } - - // Always attempt to patch the object after each reconciliation. - defer func() { - // Patch the object, prioritizing the conditions owned by the controller in - // case of any conflicts. - patchOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: helmRepositoryOCIOwnedConditions, - }, - } - patchOpts = append(patchOpts, patch.WithFieldOwner(r.ControllerName)) - // If a reconcile annotation value is found, set it in the object status - // as status.lastHandledReconcileAt. - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - object.SetStatusLastHandledReconcileAt(obj, v) - } - - // Set status observed generation option if the object is stalled, or - // if the object is ready. - if conditions.IsStalled(obj) || conditions.IsReady(obj) { - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) - } - - if err = patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.GetDeletionTimestamp().IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - retErr = kerrors.NewAggregate([]error{retErr, err}) - } - - // Always record readiness and duration metrics - r.Metrics.RecordReadiness(ctx, obj) - r.Metrics.RecordDuration(ctx, obj, start) - }() - - // Add finalizer first if it doesn't exist to avoid the race condition - // between init and delete. - if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) - return ctrl.Result{Requeue: true}, nil - } - - // Examine if the object is under deletion. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, obj) - } - - // Examine if a type change has happened and act accordingly - if obj.Spec.Type != sourcev1.HelmRepositoryTypeOCI { - // Remove any stale condition and ignore the object if the type has - // changed. - obj.Status.Conditions = nil - return ctrl.Result{}, nil - } - - result, retErr = r.reconcile(ctx, obj) - return -} - -// reconcile reconciles the HelmRepository object. While reconciling, when an -// error is encountered, it sets the failure details in the appropriate status -// condition type and returns the error with appropriate ctrl.Result. The object -// status conditions and the returned results are evaluated in the deferred -// block at the very end to summarize the conditions to be in a consistent -// state. -func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, obj *v1beta2.HelmRepository) (result ctrl.Result, retErr error) { - oldObj := obj.DeepCopy() - - defer func() { - // If it's stalled, ensure reconciling is removed. - if sc := conditions.Get(obj, meta.StalledCondition); sc != nil && sc.Status == metav1.ConditionTrue { - conditions.Delete(obj, meta.ReconcilingCondition) - } - - // Check if it's a successful reconciliation. - if result.RequeueAfter == obj.GetRequeueAfter() && result.Requeue == false && - retErr == nil { - // Remove reconciling condition if the reconciliation was successful. - conditions.Delete(obj, meta.ReconcilingCondition) - // If it's not ready even though it's not reconciling or stalled, - // set the ready failure message as the error. - // Based on isNonStalledSuccess() from internal/reconcile/summarize. - if ready := conditions.Get(obj, meta.ReadyCondition); ready != nil && - ready.Status == metav1.ConditionFalse && !conditions.IsStalled(obj) { - retErr = errors.New(conditions.GetMessage(obj, meta.ReadyCondition)) - } - } - - // If it's still a successful reconciliation and it's not reconciling or - // stalled, mark Ready=True. - if !conditions.IsReconciling(obj) && !conditions.IsStalled(obj) && - retErr == nil && result.RequeueAfter == obj.GetRequeueAfter() { - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "Helm repository is ready") - } - - // Emit events when object's state changes. - ready := conditions.Get(obj, meta.ReadyCondition) - // Became ready from not ready. - if !conditions.IsReady(oldObj) && conditions.IsReady(obj) { - r.eventLogf(ctx, obj, corev1.EventTypeNormal, ready.Reason, ready.Message) - } - // Became not ready from ready. - if conditions.IsReady(oldObj) && !conditions.IsReady(obj) { - r.eventLogf(ctx, obj, corev1.EventTypeWarning, ready.Reason, ready.Message) - } - }() - - // Set reconciling condition. - if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) - } - - // Ensure that it's an OCI URL before continuing. - if !helmreg.IsOCI(obj.Spec.URL) { - u, err := url.Parse(obj.Spec.URL) - if err != nil { - err = fmt.Errorf("failed to parse URL: %w", err) - } else { - err = fmt.Errorf("URL scheme '%s' in '%s' is not supported", u.Scheme, obj.Spec.URL) - } - conditions.MarkStalled(obj, sourcev1.URLInvalidReason, err.Error()) - conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.URLInvalidReason, err.Error()) - ctrl.LoggerFrom(ctx).Error(err, "reconciliation stalled") - result, retErr = ctrl.Result{}, nil - return - } - conditions.Delete(obj, meta.StalledCondition) - - var loginOpts []helmreg.LoginOption - // Configure any authentication related options. - if obj.Spec.SecretRef != nil { - // Attempt to retrieve secret. - name := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.Spec.SecretRef.Name, - } - var secret corev1.Secret - if err := r.Client.Get(ctx, name, &secret); err != nil { - e := fmt.Errorf("failed to get secret '%s': %w", name.String(), err) - conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error()) - result, retErr = ctrl.Result{}, e - return - } - - // Construct login options. - loginOpt, err := registry.LoginOptionFromSecret(obj.Spec.URL, secret) - if err != nil { - e := fmt.Errorf("failed to configure Helm client with secret data: %w", err) - conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error()) - result, retErr = ctrl.Result{}, e - return - } - - if loginOpt != nil { - loginOpts = append(loginOpts, loginOpt) - } - } - - // Create registry client and login if needed. - registryClient, file, err := r.RegistryClientGenerator(loginOpts != nil) - if err != nil { - e := fmt.Errorf("failed to create registry client: %w", err) - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, e.Error()) - result, retErr = ctrl.Result{}, e - return - } - if file != "" { - defer func() { - if err := os.Remove(file); err != nil { - r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason, - "failed to delete temporary credentials file: %s", err) - } - }() - } - - chartRepo, err := repository.NewOCIChartRepository(obj.Spec.URL, repository.WithOCIRegistryClient(registryClient)) - if err != nil { - e := fmt.Errorf("failed to parse URL '%s': %w", obj.Spec.URL, err) - conditions.MarkStalled(obj, sourcev1.URLInvalidReason, e.Error()) - conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.URLInvalidReason, e.Error()) - result, retErr = ctrl.Result{}, nil - return - } - conditions.Delete(obj, meta.StalledCondition) - - // Attempt to login to the registry if credentials are provided. - if loginOpts != nil { - err = chartRepo.Login(loginOpts...) - if err != nil { - e := fmt.Errorf("failed to login to registry '%s': %w", obj.Spec.URL, err) - conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error()) - result, retErr = ctrl.Result{}, e - return - } - } - - // Remove any stale Ready condition, most likely False, set above. Its value - // is derived from the overall result of the reconciliation in the deferred - // block at the very end. - conditions.Delete(obj, meta.ReadyCondition) - - result, retErr = ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil - return -} - -func (r *HelmRepositoryOCIReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) { - // Remove our finalizer from the list - controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -// eventLogf records events, and logs at the same time. -// -// This log is different from the debug log in the EventRecorder, in the sense -// that this is a simple log. While the debug log contains complete details -// about the event. -func (r *HelmRepositoryOCIReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { - msg := fmt.Sprintf(messageFmt, args...) - // Log and emit event. - if eventType == corev1.EventTypeWarning { - ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) - } else { - ctrl.LoggerFrom(ctx).Info(msg) - } - r.Eventf(obj, eventType, reason, msg) -} diff --git a/controllers/helmrepository_controller_oci_test.go b/controllers/helmrepository_controller_oci_test.go deleted file mode 100644 index 62d49ec29..000000000 --- a/controllers/helmrepository_controller_oci_test.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "encoding/base64" - "fmt" - "testing" - - "github.com/darkowlzz/controller-check/status" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func TestHelmRepositoryOCIReconciler_Reconcile(t *testing.T) { - tests := []struct { - name string - secretType corev1.SecretType - secretData map[string][]byte - }{ - { - name: "valid auth data", - secretData: map[string][]byte{ - "username": []byte(testRegistryUsername), - "password": []byte(testRegistryPassword), - }, - }, - { - name: "no auth data", - secretData: nil, - }, - { - name: "dockerconfigjson Secret", - secretType: corev1.SecretTypeDockerConfigJson, - secretData: map[string][]byte{ - ".dockerconfigjson": []byte(`{"auths":{"` + - testRegistryServer.registryHost + `":{"` + - `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - ns, err := testEnv.CreateNamespace(ctx, "helmrepository-oci-reconcile-test") - g.Expect(err).ToNot(HaveOccurred()) - defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - Namespace: ns.Name, - }, - Data: tt.secretData, - } - if tt.secretType != "" { - secret.Type = tt.secretType - } - - g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed()) - - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-oci-reconcile-", - Namespace: ns.Name, - }, - Spec: sourcev1.HelmRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - URL: fmt.Sprintf("oci://%s", testRegistryServer.registryHost), - SecretRef: &meta.LocalObjectReference{ - Name: secret.Name, - }, - Type: sourcev1.HelmRepositoryTypeOCI, - }, - } - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for HelmRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Patch the object with reconcile request annotation. - patchHelper, err := patch.NewHelper(obj, testEnv.Client) - g.Expect(err).ToNot(HaveOccurred()) - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return obj.Status.LastHandledReconcileAt == "now" - }, timeout).Should(BeTrue()) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmRepository to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) - }) - } -} diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go deleted file mode 100644 index 2e8df4873..000000000 --- a/controllers/helmrepository_controller_test.go +++ /dev/null @@ -1,1359 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net/http" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/darkowlzz/controller-check/status" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/helmtestserver" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" - . "github.com/onsi/gomega" - helmgetter "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - "github.com/fluxcd/source-controller/internal/helm/getter" - "github.com/fluxcd/source-controller/internal/helm/repository" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/internal/reconcile/summarize" -) - -func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { - g := NewWithT(t) - - testServer, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(testServer.Root()) - - g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) - g.Expect(testServer.GenerateIndex()).To(Succeed()) - - testServer.Start() - defer testServer.Stop() - - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-reconcile-", - Namespace: "default", - }, - Spec: sourcev1.HelmRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - URL: testServer.URL(), - }, - } - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for HelmRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) && obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return readyCondition.Status == metav1.ConditionTrue && - obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Patch the object with reconcile request annotation. - patchHelper, err := patch.NewHelper(obj, testEnv.Client) - g.Expect(err).ToNot(HaveOccurred()) - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return obj.Status.LastHandledReconcileAt == "now" - }, timeout).Should(BeTrue()) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmRepository to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) -} - -func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error - want sreconcile.Result - wantErr bool - assertArtifact *sourcev1.Artifact - assertConditions []metav1.Condition - assertPaths []string - }{ - { - name: "garbage collects", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { - revisions := []string{"a", "b", "c", "d"} - for n := range revisions { - v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), - Revision: v, - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { - return err - } - if n != len(revisions)-1 { - time.Sleep(time.Second * 1) - } - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/d.txt", - Revision: "d", - Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", - URL: testStorage.Hostname + "/reconcile-storage/d.txt", - Size: int64p(int64(len("d"))), - }, - assertPaths: []string{ - "/reconcile-storage/d.txt", - "/reconcile-storage/c.txt", - "!/reconcile-storage/b.txt", - "!/reconcile-storage/a.txt", - }, - want: sreconcile.ResultSuccess, - }, - { - name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/reconcile-storage/invalid.txt", - Revision: "d", - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "!/reconcile-storage/invalid.txt", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), - }, - }, - { - name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: "http://outdated.com/reconcile-storage/hostname.txt", - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { - return err - } - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "/reconcile-storage/hostname.txt", - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", - Size: int64p(int64(len("file"))), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &HelmRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - }, - } - if tt.beforeFunc != nil { - g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) - } - - var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact - - got, err := r.reconcileStorage(context.TODO(), obj, &artifact, &chartRepo) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) - if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { - g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) - } - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - for _, p := range tt.assertPaths { - absoluteP := filepath.Join(testStorage.BasePath, p) - if !strings.HasPrefix(p, "!") { - g.Expect(absoluteP).To(BeAnExistingFile()) - continue - } - g.Expect(absoluteP).NotTo(BeAnExistingFile()) - } - }) - } -} - -func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { - type options struct { - username string - password string - publicKey []byte - privateKey []byte - ca []byte - } - - tests := []struct { - name string - protocol string - server options - secret *corev1.Secret - beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, checksum string) - afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "HTTP without secretRef makes ArtifactOutdated=True", - protocol: "http", - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - t.Expect(chartRepo.Checksum).ToNot(BeEmpty()) - t.Expect(chartRepo.CachePath).ToNot(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).ToNot(BeEmpty()) - }, - }, - { - name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", - protocol: "http", - server: options{ - username: "git", - password: "1234", - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "basic-auth", - }, - Data: map[string][]byte{ - "username": []byte("git"), - "password": []byte("1234"), - }, - }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - t.Expect(chartRepo.Checksum).ToNot(BeEmpty()) - t.Expect(chartRepo.CachePath).ToNot(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).ToNot(BeEmpty()) - }, - }, - { - name: "HTTPS with CAFile secret makes ArtifactOutdated=True", - protocol: "https", - server: options{ - publicKey: tlsPublicKey, - privateKey: tlsPrivateKey, - ca: tlsCA, - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", - }, - Data: map[string][]byte{ - "caFile": tlsCA, - }, - }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - t.Expect(chartRepo.Checksum).ToNot(BeEmpty()) - t.Expect(chartRepo.CachePath).ToNot(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).ToNot(BeEmpty()) - }, - }, - { - name: "HTTPS with invalid CAFile secret makes FetchFailed=True and returns error", - protocol: "https", - server: options{ - publicKey: tlsPublicKey, - privateKey: tlsPrivateKey, - ca: tlsCA, - }, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-ca", - }, - Data: map[string][]byte{ - "caFile": []byte("invalid"), - }, - }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to create TLS client config with secret data: cannot append certificate into certificate pool: invalid caFile"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - // No repo index due to fetch fail. - t.Expect(chartRepo.Checksum).To(BeEmpty()) - t.Expect(chartRepo.CachePath).To(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).To(BeEmpty()) - }, - }, - { - name: "Invalid URL makes FetchFailed=True and returns stalling error", - protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - // No repo index due to fetch fail. - t.Expect(chartRepo.Checksum).To(BeEmpty()) - t.Expect(chartRepo.CachePath).To(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).To(BeEmpty()) - }, - }, - { - name: "Unsupported scheme makes FetchFailed=True and returns stalling error", - protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - // No repo index due to fetch fail. - t.Expect(chartRepo.Checksum).To(BeEmpty()) - t.Expect(chartRepo.CachePath).To(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).To(BeEmpty()) - }, - }, - { - name: "Missing secret returns FetchFailed=True and returns error", - protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - // No repo index due to fetch fail. - t.Expect(chartRepo.Checksum).To(BeEmpty()) - t.Expect(chartRepo.CachePath).To(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).To(BeEmpty()) - }, - }, - { - name: "Malformed secret returns FetchFailed=True and returns error", - protocol: "http", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "malformed-basic-auth", - }, - Data: map[string][]byte{ - "username": []byte("git"), - }, - }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} - }, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"), - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - // No repo index due to fetch fail. - t.Expect(chartRepo.Checksum).To(BeEmpty()) - t.Expect(chartRepo.CachePath).To(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).To(BeEmpty()) - }, - }, - { - name: "cached index with same checksum", - protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: checksum, - Checksum: checksum, - } - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - // chartRepo.Checksum isn't populated, artifact.Checksum is - // populated from the cached repo index data. - t.Expect(chartRepo.Checksum).To(BeEmpty()) - t.Expect(chartRepo.CachePath).ToNot(BeEmpty()) - t.Expect(artifact.Checksum).To(Equal(obj.Status.Artifact.Checksum)) - t.Expect(artifact.Revision).To(Equal(obj.Status.Artifact.Revision)) - }, - want: sreconcile.ResultSuccess, - }, - { - name: "cached index with different checksum", - protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: checksum, - Checksum: "foo", - } - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) { - t.Expect(chartRepo.Checksum).ToNot(BeEmpty()) - t.Expect(chartRepo.CachePath).ToNot(BeEmpty()) - t.Expect(artifact.Checksum).To(BeEmpty()) - t.Expect(artifact.Revision).To(Equal(obj.Status.Artifact.Revision)) - }, - want: sreconcile.ResultSuccess, - }, - } - - for _, tt := range tests { - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "auth-strategy-", - }, - Spec: sourcev1.HelmRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - server, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(server.Root()) - - g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed()) - g.Expect(server.GenerateIndex()).To(Succeed()) - - if len(tt.server.username+tt.server.password) > 0 { - server.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || u != tt.server.username || p != tt.server.password { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - } - - secret := tt.secret.DeepCopy() - switch tt.protocol { - case "http": - server.Start() - defer server.Stop() - obj.Spec.URL = server.URL() - case "https": - g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) - defer server.Stop() - obj.Spec.URL = server.URL() - default: - t.Fatalf("unsupported protocol %q", tt.protocol) - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - if secret != nil { - builder.WithObjects(secret.DeepCopy()) - } - - // Calculate the artifact checksum for valid repos configurations. - clientOpts := []helmgetter.Option{ - helmgetter.WithURL(server.URL()), - } - var newChartRepo *repository.ChartRepository - var tOpts *tls.Config - validSecret := true - if secret != nil { - // Extract the client options from secret, ignoring any invalid - // value. validSecret is used to determine if the indexChecksum - // should be calculated below. - var cOpts []helmgetter.Option - var serr error - cOpts, serr = getter.ClientOptionsFromSecret(*secret) - if serr != nil { - validSecret = false - } - clientOpts = append(clientOpts, cOpts...) - tOpts, serr = getter.TLSClientConfigFromSecret(*secret, server.URL()) - if serr != nil { - validSecret = false - } - newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, tOpts, clientOpts) - } else { - newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil, nil) - } - g.Expect(err).ToNot(HaveOccurred()) - - // NOTE: checksum will be empty in beforeFunc for invalid repo - // configurations as the client can't get the repo. - var indexChecksum string - if validSecret { - indexChecksum, err = newChartRepo.CacheIndex() - g.Expect(err).ToNot(HaveOccurred()) - } - - if tt.beforeFunc != nil { - tt.beforeFunc(g, obj, indexChecksum) - } - - r := &HelmRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Client: builder.Build(), - Storage: testStorage, - Getters: testGetters, - } - - var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact - got, err := r.reconcileSource(context.TODO(), obj, &artifact, &chartRepo) - defer os.Remove(chartRepo.CachePath) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - if tt.afterFunc != nil { - tt.afterFunc(g, obj, artifact, chartRepo) - } - }) - } -} - -func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { - tests := []struct { - name string - beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) - afterFunc func(t *WithT, obj *sourcev1.HelmRepository) - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "Archiving artifact to storage makes ArtifactInStorage=True", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), - }, - }, - { - name: "Up-to-date artifact should not update status", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - obj.Status.Artifact = artifact.DeepCopy() - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - t.Expect(obj.Status.URL).To(BeEmpty()) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), - }, - }, - { - name: "Removes ArtifactOutdatedCondition after creating a new artifact", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), - }, - }, - { - name: "Creates latest symlink to the created artifact", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { - localPath := testStorage.LocalPath(*obj.GetArtifact()) - symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml") - targetFile, err := os.Readlink(symlinkPath) - t.Expect(err).NotTo(HaveOccurred()) - t.Expect(localPath).To(Equal(targetFile)) - }, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &HelmRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.HelmRepository{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.HelmRepositoryKind, - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-bucket-", - Generation: 1, - Namespace: "default", - }, - Spec: sourcev1.HelmRepositorySpec{ - Timeout: &metav1.Duration{Duration: timeout}, - URL: "https://example.com/index.yaml", - }, - } - - tmpDir := t.TempDir() - - // Create an empty cache file. - cachePath := filepath.Join(tmpDir, "index.yaml") - cacheFile, err := os.Create(cachePath) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cacheFile.Close()).ToNot(HaveOccurred()) - - chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil, nil) - g.Expect(err).ToNot(HaveOccurred()) - chartRepo.CachePath = cachePath - - artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") - // Checksum of the index file calculated by the ChartRepository. - artifact.Checksum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - - if tt.beforeFunc != nil { - tt.beforeFunc(g, obj, artifact, chartRepo) - } - - got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, chartRepo) - g.Expect(err != nil).To(Equal(tt.wantErr)) - g.Expect(got).To(Equal(tt.want)) - - // On error, artifact is empty. Check artifacts only on successful - // reconcile. - if !tt.wantErr { - g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) - } - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - if tt.afterFunc != nil { - tt.afterFunc(g, obj) - } - }) - } -} - -func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { - // Helper to build simple helmRepositoryReconcileFunc with result and error. - buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc { - return func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { - return r, e - } - } - - tests := []struct { - name string - generation int64 - observedGeneration int64 - reconcileFuncs []helmRepositoryReconcileFunc - wantResult sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "successful reconciliations", - reconcileFuncs: []helmRepositoryReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - }, - wantResult: sreconcile.ResultSuccess, - wantErr: false, - }, - { - name: "successful reconciliation with generation difference", - generation: 3, - observedGeneration: 2, - reconcileFuncs: []helmRepositoryReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - }, - wantResult: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"), - }, - }, - { - name: "failed reconciliation", - reconcileFuncs: []helmRepositoryReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), - }, - wantResult: sreconcile.ResultEmpty, - wantErr: true, - }, - { - name: "multiple object status conditions mutations", - reconcileFuncs: []helmRepositoryReconcileFunc{ - func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") - return sreconcile.ResultSuccess, nil - }, - func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { - conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") - return sreconcile.ResultSuccess, nil - }, - }, - wantResult: sreconcile.ResultSuccess, - wantErr: false, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"), - }, - }, - { - name: "subrecs with one result=Requeue, no error", - reconcileFuncs: []helmRepositoryReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - buildReconcileFuncs(sreconcile.ResultRequeue, nil), - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - }, - wantResult: sreconcile.ResultRequeue, - wantErr: false, - }, - { - name: "subrecs with error before result=Requeue", - reconcileFuncs: []helmRepositoryReconcileFunc{ - buildReconcileFuncs(sreconcile.ResultSuccess, nil), - buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), - buildReconcileFuncs(sreconcile.ResultRequeue, nil), - }, - wantResult: sreconcile.ResultEmpty, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - r := &HelmRepositoryReconciler{} - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - Generation: tt.generation, - }, - Status: sourcev1.HelmRepositoryStatus{ - ObservedGeneration: tt.observedGeneration, - }, - } - - ctx := context.TODO() - - gotRes, gotErr := r.reconcile(ctx, obj, tt.reconcileFuncs) - g.Expect(gotErr != nil).To(Equal(tt.wantErr)) - g.Expect(gotRes).To(Equal(tt.wantResult)) - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { - tests := []struct { - name string - beforeFunc func(obj *sourcev1.HelmRepository) - assertConditions []metav1.Condition - }{ - { - name: "positive conditions only", - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - { - name: "multiple failures", - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), - }, - }, - { - name: "mixed positive and negative conditions", - beforeFunc: func(obj *sourcev1.HelmRepository) { - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") - }, - assertConditions: []metav1.Condition{ - *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - obj := &sourcev1.HelmRepository{ - TypeMeta: metav1.TypeMeta{ - Kind: sourcev1.HelmRepositoryKind, - APIVersion: "source.toolkit.fluxcd.io/v1beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "helmrepo", - Namespace: "foo", - }, - } - clientBuilder := fake.NewClientBuilder() - clientBuilder.WithObjects(obj) - c := clientBuilder.Build() - - patchHelper, err := patch.NewHelper(obj, c) - g.Expect(err).ToNot(HaveOccurred()) - - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - ctx := context.TODO() - recResult := sreconcile.ResultSuccess - var retErr error - - summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper) - summarizeOpts := []summarize.Option{ - summarize.WithConditions(helmRepositoryReadyCondition), - summarize.WithReconcileResult(recResult), - summarize.WithReconcileError(retErr), - summarize.WithIgnoreNotFound(), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), - summarize.WithPatchFieldOwner("source-controller"), - } - _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - - key := client.ObjectKeyFromObject(obj) - g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) - g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestHelmRepositoryReconciler_notify(t *testing.T) { - var aSize int64 = 30000 - tests := []struct { - name string - res sreconcile.Result - resErr error - oldObjBeforeFunc func(obj *sourcev1.HelmRepository) - newObjBeforeFunc func(obj *sourcev1.HelmRepository) - wantEvent string - }{ - { - name: "error - no event", - res: sreconcile.ResultEmpty, - resErr: errors.New("some error"), - }, - { - name: "new artifact with nil size", - res: sreconcile.ResultSuccess, - resErr: nil, - newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: nil} - }, - wantEvent: "Normal NewArtifact stored fetched index of unknown size", - }, - { - name: "new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize} - }, - wantEvent: "Normal NewArtifact stored fetched index of size", - }, - { - name: "recovery from failure", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal Succeeded stored fetched index of size", - }, - { - name: "recovery and new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb", Size: &aSize} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal NewArtifact stored fetched index of size", - }, - { - name: "no updates", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - recorder := record.NewFakeRecorder(32) - - oldObj := &sourcev1.HelmRepository{} - newObj := oldObj.DeepCopy() - - if tt.oldObjBeforeFunc != nil { - tt.oldObjBeforeFunc(oldObj) - } - if tt.newObjBeforeFunc != nil { - tt.newObjBeforeFunc(newObj) - } - - reconciler := &HelmRepositoryReconciler{ - EventRecorder: recorder, - } - chartRepo := repository.ChartRepository{ - URL: "some-address", - } - reconciler.notify(ctx, oldObj, newObj, chartRepo, tt.res, tt.resErr) - - select { - case x, ok := <-recorder.Events: - g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") - if tt.wantEvent != "" { - g.Expect(x).To(ContainSubstring(tt.wantEvent)) - } - default: - if tt.wantEvent != "" { - t.Errorf("expected some event to be emitted") - } - } - }) - } -} - -func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing.T) { - g := NewWithT(t) - - testServer, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(testServer.Root()) - - g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) - g.Expect(testServer.GenerateIndex()).To(Succeed()) - - testServer.Start() - defer testServer.Stop() - - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-reconcile-", - Namespace: "default", - }, - Spec: sourcev1.HelmRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - URL: testServer.URL(), - }, - } - g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for HelmRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) && obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return readyCondition.Status == metav1.ConditionTrue && - obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Switch to a OCI helm repository type - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-reconcile-", - Namespace: "default", - }, - Data: map[string][]byte{ - "username": []byte(testRegistryUsername), - "password": []byte(testRegistryPassword), - }, - } - g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed()) - - obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI - obj.Spec.URL = fmt.Sprintf("oci://%s", testRegistryServer.registryHost) - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: secret.Name, - } - - oldGen := obj.GetGeneration() - g.Expect(testEnv.Update(ctx, obj)).To(Succeed()) - newGen := oldGen + 1 - - // Wait for HelmRepository to be Ready with new generation. - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) && obj.Status.Artifact != nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - if readyCondition == nil { - return false - } - return readyCondition.Status == metav1.ConditionTrue && - newGen == readyCondition.ObservedGeneration && - newGen == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns = &status.Conditions{NegativePolarity: helmRepositoryOCINegativeConditions} - checker = status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmRepository to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) -} - -func TestHelmRepositoryReconciler_ReconcileSpecUpdatePredicateFilter(t *testing.T) { - g := NewWithT(t) - - testServer, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(testServer.Root()) - - g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) - g.Expect(testServer.GenerateIndex()).To(Succeed()) - - testServer.Start() - defer testServer.Stop() - - obj := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-reconcile-", - Namespace: "default", - }, - Spec: sourcev1.HelmRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - URL: testServer.URL(), - }, - } - g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for HelmRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) && obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return readyCondition.Status == metav1.ConditionTrue && - obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check. - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Change spec Interval to validate spec update - obj.Spec.Interval = metav1.Duration{Duration: interval + time.Second} - oldGen := obj.GetGeneration() - g.Expect(testEnv.Update(ctx, obj)).To(Succeed()) - newGen := oldGen + 1 - - // Wait for HelmRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) && obj.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return readyCondition.Status == metav1.ConditionTrue && - newGen == readyCondition.ObservedGeneration && - newGen == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the object status is valid. - condns = &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} - checker = status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - - // Wait for HelmRepository to be deleted - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) -} - -func TestHelmRepositoryReconciler_InMemoryCaching(t *testing.T) { - g := NewWithT(t) - testCache.Clear() - - testServer, err := helmtestserver.NewTempHelmServer() - g.Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(testServer.Root()) - - g.Expect(testServer.PackageChartWithVersion("testdata/charts/helmchart", "0.1.0")).To(Succeed()) - g.Expect(testServer.GenerateIndex()).To(Succeed()) - - testServer.Start() - defer testServer.Stop() - - ns, err := testEnv.CreateNamespace(ctx, "helmrepository") - g.Expect(err).ToNot(HaveOccurred()) - defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - - helmRepo := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "helmrepository-", - Namespace: ns.Name, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: testServer.URL(), - }, - } - g.Expect(testEnv.CreateAndWait(ctx, helmRepo)).To(Succeed()) - - key := client.ObjectKey{Name: helmRepo.Name, Namespace: helmRepo.Namespace} - // Wait for finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, helmRepo); err != nil { - return false - } - return len(helmRepo.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for HelmRepository to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, helmRepo); err != nil { - return false - } - if !conditions.IsReady(helmRepo) || helmRepo.Status.Artifact == nil { - return false - } - readyCondition := conditions.Get(helmRepo, meta.ReadyCondition) - return helmRepo.Generation == readyCondition.ObservedGeneration && - helmRepo.Generation == helmRepo.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - err = testEnv.Get(ctx, key, helmRepo) - g.Expect(err).ToNot(HaveOccurred()) - localPath := testStorage.LocalPath(*helmRepo.GetArtifact()) - _, cacheHit := testCache.Get(localPath) - g.Expect(cacheHit).To(BeTrue()) -} diff --git a/controllers/ocirepository_controller.go b/controllers/ocirepository_controller.go deleted file mode 100644 index 2a4993bbb..000000000 --- a/controllers/ocirepository_controller.go +++ /dev/null @@ -1,910 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "net/http" - "os" - "sort" - "strings" - "time" - - "github.com/Masterminds/semver/v3" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/authn/k8schain" - "github.com/google/go-containerregistry/pkg/crane" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/uuid" - kuberecorder "k8s.io/client-go/tools/record" - - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/oci" - "github.com/fluxcd/pkg/oci/auth/login" - "github.com/fluxcd/pkg/runtime/conditions" - helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/patch" - "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/pkg/untar" - "github.com/fluxcd/pkg/version" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - serror "github.com/fluxcd/source-controller/internal/error" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/internal/util" -) - -// ociRepositoryReadyCondition contains the information required to summarize a -// v1beta2.OCIRepository Ready Condition. -var ociRepositoryReadyCondition = summarize.Conditions{ - Target: meta.ReadyCondition, - Owned: []string{ - sourcev1.StorageOperationFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactInStorageCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - Summarize: []string{ - sourcev1.StorageOperationFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - sourcev1.ArtifactInStorageCondition, - meta.StalledCondition, - meta.ReconcilingCondition, - }, - NegativePolarity: []string{ - sourcev1.StorageOperationFailedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactOutdatedCondition, - meta.StalledCondition, - meta.ReconcilingCondition, - }, -} - -// ociRepositoryFailConditions contains the conditions that represent a failure. -var ociRepositoryFailConditions = []string{ - sourcev1.FetchFailedCondition, - sourcev1.StorageOperationFailedCondition, -} - -type invalidOCIURLError struct { - err error -} - -func (e invalidOCIURLError) Error() string { - return e.err.Error() -} - -// ociRepositoryReconcileFunc is the function type for all the v1beta2.OCIRepository -// (sub)reconcile functions. The type implementations are grouped and -// executed serially to perform the complete reconcile of the object. -type ociRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) - -// OCIRepositoryReconciler reconciles a v1beta2.OCIRepository object -type OCIRepositoryReconciler struct { - client.Client - helper.Metrics - kuberecorder.EventRecorder - - Storage *Storage - ControllerName string - requeueDependency time.Duration -} - -type OCIRepositoryReconcilerOptions struct { - MaxConcurrentReconciles int - DependencyRequeueInterval time.Duration - RateLimiter ratelimiter.RateLimiter -} - -// SetupWithManager sets up the controller with the Manager. -func (r *OCIRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, OCIRepositoryReconcilerOptions{}) -} - -func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts OCIRepositoryReconcilerOptions) error { - r.requeueDependency = opts.DependencyRequeueInterval - - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.OCIRepository{}, builder.WithPredicates( - predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: opts.MaxConcurrentReconciles, - RateLimiter: opts.RateLimiter, - RecoverPanic: true, - }). - Complete(r) -} - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { - start := time.Now() - log := ctrl.LoggerFrom(ctx). - // Sets a reconcile ID to correlate logs from all suboperations. - WithValues("reconcileID", uuid.NewUUID()) - - // logger will be associated to the new context that is - // returned from ctrl.LoggerInto. - ctx = ctrl.LoggerInto(ctx, log) - - // Fetch the OCIRepository - obj := &sourcev1.OCIRepository{} - if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Record suspended status metric - r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - - // Return early if the object is suspended - if obj.Spec.Suspend { - log.Info("reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // Initialize the patch helper with the current version of the object. - patchHelper, err := patch.NewHelper(obj, r.Client) - if err != nil { - return ctrl.Result{}, err - } - - // recResult stores the abstracted reconcile result. - var recResult sreconcile.Result - - // Always attempt to patch the object and status after each reconciliation - // NOTE: The final runtime result and error are set in this block. - defer func() { - summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) - summarizeOpts := []summarize.Option{ - summarize.WithConditions(ociRepositoryReadyCondition), - summarize.WithReconcileResult(recResult), - summarize.WithReconcileError(retErr), - summarize.WithIgnoreNotFound(), - summarize.WithProcessors( - summarize.ErrorActionHandler, - summarize.RecordReconcileReq, - ), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), - summarize.WithPatchFieldOwner(r.ControllerName), - } - result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - - // Always record readiness and duration metrics - r.Metrics.RecordReadiness(ctx, obj) - r.Metrics.RecordDuration(ctx, obj, start) - }() - - // Add finalizer first if not exist to avoid the race condition between init and delete - if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) - recResult = sreconcile.ResultRequeue - return - } - - // Examine if the object is under deletion - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - recResult, retErr = r.reconcileDelete(ctx, obj) - return - } - - // Reconcile actual object - reconcilers := []ociRepositoryReconcileFunc{ - r.reconcileStorage, - r.reconcileSource, - r.reconcileArtifact, - } - recResult, retErr = r.reconcile(ctx, obj, reconcilers) - return -} - -// reconcile iterates through the ociRepositoryReconcileFunc tasks for the -// object. It returns early on the first call that returns -// reconcile.ResultRequeue, or produces an error. -func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { - oldObj := obj.DeepCopy() - - // Mark as reconciling if generation differs. - if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) - } - - // Create temp working dir - tmpDir, err := util.TempDirForObj("", obj) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to create temporary working directory: %w", err), - sourcev1.DirCreationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - defer func() { - if err = os.RemoveAll(tmpDir); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") - } - }() - conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) - - var ( - res sreconcile.Result - resErr error - metadata = sourcev1.Artifact{} - ) - - // Run the sub-reconcilers and build the result of reconciliation. - for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, &metadata, tmpDir) - // Exit immediately on ResultRequeue. - if recResult == sreconcile.ResultRequeue { - return sreconcile.ResultRequeue, nil - } - // If an error is received, prioritize the returned results because an - // error also means immediate requeue. - if err != nil { - resErr = err - res = recResult - break - } - // Prioritize requeue request in the result. - res = sreconcile.LowestRequeuingResult(res, recResult) - } - - r.notify(ctx, oldObj, obj, res, resErr) - - return res, resErr -} - -// reconcileSource fetches the upstream OCI artifact metadata and content. -// If this fails, it records v1beta2.FetchFailedCondition=True on the object and returns early. -func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) { - ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) - defer cancel() - - options := r.craneOptions(ctxTimeout) - - // Generate the registry credential keychain either from static credentials or using cloud OIDC - keychain, err := r.keychain(ctx, obj) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to get credential: %w", err), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - options = append(options, crane.WithAuthFromKeychain(keychain)) - - if obj.Spec.Provider != sourcev1.GenericOCIProvider { - auth, authErr := r.oidcAuth(ctxTimeout, obj) - if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) { - e := serror.NewGeneric( - fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - if auth != nil { - options = append(options, crane.WithAuth(auth)) - } - } - - // Generate the transport for remote operations - transport, err := r.transport(ctx, obj) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to generate transport for '%s': %w", obj.Spec.URL, err), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - if transport != nil { - options = append(options, crane.WithTransport(transport)) - } - - // Determine which artifact revision to pull - url, err := r.getArtifactURL(obj, options) - if err != nil { - if _, ok := err.(invalidOCIURLError); ok { - e := serror.NewStalling( - fmt.Errorf("URL validation failed for '%s': %w", obj.Spec.URL, err), - sourcev1.URLInvalidReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - e := serror.NewGeneric( - fmt.Errorf("failed to determine the artifact tag for '%s': %w", obj.Spec.URL, err), - sourcev1.ReadOperationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Pull artifact from the remote container registry - img, err := crane.Pull(url, options...) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err), - sourcev1.OCIPullFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Determine the artifact SHA256 digest - imgDigest, err := img.Digest() - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to determine artifact digest: %w", err), - sourcev1.OCILayerOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Set the internal revision to the remote digest hex - revision := imgDigest.Hex - - // Copy the OCI annotations to the internal artifact metadata - manifest, err := img.Manifest() - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to parse artifact manifest: %w", err), - sourcev1.OCILayerOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - m := &sourcev1.Artifact{ - Revision: revision, - Metadata: manifest.Annotations, - } - m.DeepCopyInto(metadata) - - // Mark observations about the revision on the object - defer func() { - if !obj.GetArtifact().HasRevision(revision) { - message := fmt.Sprintf("new digest '%s' for '%s'", revision, url) - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) - conditions.MarkReconciling(obj, "NewRevision", message) - } - }() - - // Extract the content of the first artifact layer - if !obj.GetArtifact().HasRevision(revision) { - layers, err := img.Layers() - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to parse artifact layers: %w", err), - sourcev1.OCILayerOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - if len(layers) < 1 { - e := serror.NewGeneric( - fmt.Errorf("no layers found in artifact"), - sourcev1.OCILayerOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - blob, err := layers[0].Compressed() - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to extract the first layer from artifact: %w", err), - sourcev1.OCILayerOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - if _, err = untar.Untar(blob, dir); err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to untar the first layer from artifact: %w", err), - sourcev1.OCILayerOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - } - - conditions.Delete(obj, sourcev1.FetchFailedCondition) - return sreconcile.ResultSuccess, nil -} - -// parseRepositoryURL validates and extracts the repository URL. -func (r *OCIRepositoryReconciler) parseRepositoryURL(obj *sourcev1.OCIRepository) (string, error) { - if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) { - return "", fmt.Errorf("URL must be in format 'oci:////'") - } - - url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) - ref, err := name.ParseReference(url) - if err != nil { - return "", err - } - - imageName := strings.TrimPrefix(url, ref.Context().RegistryStr()) - if s := strings.Split(imageName, ":"); len(s) > 1 { - return "", fmt.Errorf("URL must not contain a tag; remove ':%s'", s[1]) - } - - return ref.Context().Name(), nil -} - -// getArtifactURL determines which tag or digest should be used and returns the OCI artifact FQN. -func (r *OCIRepositoryReconciler) getArtifactURL(obj *sourcev1.OCIRepository, options []crane.Option) (string, error) { - url, err := r.parseRepositoryURL(obj) - if err != nil { - return "", invalidOCIURLError{err} - } - - if obj.Spec.Reference != nil { - if obj.Spec.Reference.Digest != "" { - return fmt.Sprintf("%s@%s", url, obj.Spec.Reference.Digest), nil - } - - if obj.Spec.Reference.SemVer != "" { - tag, err := r.getTagBySemver(url, obj.Spec.Reference.SemVer, options) - if err != nil { - return "", err - } - return fmt.Sprintf("%s:%s", url, tag), nil - } - - if obj.Spec.Reference.Tag != "" { - return fmt.Sprintf("%s:%s", url, obj.Spec.Reference.Tag), nil - } - } - - return url, nil -} - -// getTagBySemver call the remote container registry, fetches all the tags from the repository, -// and returns the latest tag according to the semver expression. -func (r *OCIRepositoryReconciler) getTagBySemver(url, exp string, options []crane.Option) (string, error) { - tags, err := crane.ListTags(url, options...) - if err != nil { - return "", err - } - - constraint, err := semver.NewConstraint(exp) - if err != nil { - return "", fmt.Errorf("semver '%s' parse error: %w", exp, err) - } - - var matchingVersions []*semver.Version - for _, t := range tags { - v, err := version.ParseVersion(t) - if err != nil { - continue - } - - if constraint.Check(v) { - matchingVersions = append(matchingVersions, v) - } - } - - if len(matchingVersions) == 0 { - return "", fmt.Errorf("no match found for semver: %s", exp) - } - - sort.Sort(sort.Reverse(semver.Collection(matchingVersions))) - return matchingVersions[0].Original(), nil -} - -// keychain generates the credential keychain based on the resource -// configuration. If no auth is specified a default keychain with -// anonymous access is returned -func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) { - pullSecretNames := sets.NewString() - - // lookup auth secret - if obj.Spec.SecretRef != nil { - pullSecretNames.Insert(obj.Spec.SecretRef.Name) - } - - // lookup service account - if obj.Spec.ServiceAccountName != "" { - serviceAccountName := obj.Spec.ServiceAccountName - serviceAccount := corev1.ServiceAccount{} - err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: serviceAccountName}, &serviceAccount) - if err != nil { - return nil, err - } - for _, ips := range serviceAccount.ImagePullSecrets { - pullSecretNames.Insert(ips.Name) - } - } - - // if no pullsecrets available return DefaultKeyChain - if len(pullSecretNames) == 0 { - return authn.DefaultKeychain, nil - } - - // lookup image pull secrets - imagePullSecrets := make([]corev1.Secret, len(pullSecretNames)) - for i, imagePullSecretName := range pullSecretNames.List() { - imagePullSecret := corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: imagePullSecretName}, &imagePullSecret) - if err != nil { - r.eventLogf(ctx, obj, events.EventSeverityTrace, sourcev1.AuthenticationFailedReason, - "auth secret '%s' not found", imagePullSecretName) - return nil, err - } - imagePullSecrets[i] = imagePullSecret - } - - return k8schain.NewFromPullSecrets(ctx, imagePullSecrets) -} - -// transport clones the default transport from remote and when a certSecretRef is specified, -// the returned transport will include the TLS client and/or CA certificates. -func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository) (http.RoundTripper, error) { - if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" { - return nil, nil - } - - certSecretName := types.NamespacedName{ - Namespace: obj.Namespace, - Name: obj.Spec.CertSecretRef.Name, - } - var certSecret corev1.Secret - if err := r.Get(ctx, certSecretName, &certSecret); err != nil { - return nil, err - } - - transport := remote.DefaultTransport.Clone() - tlsConfig := transport.TLSClientConfig - - if clientCert, ok := certSecret.Data[oci.ClientCert]; ok { - // parse and set client cert and secret - if clientKey, ok := certSecret.Data[oci.ClientKey]; ok { - cert, err := tls.X509KeyPair(clientCert, clientKey) - if err != nil { - return nil, err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } else { - return nil, fmt.Errorf("'%s' found in secret, but no %s", oci.ClientCert, oci.ClientKey) - } - } - - if caCert, ok := certSecret.Data[oci.CACert]; ok { - syscerts, err := x509.SystemCertPool() - if err != nil { - return nil, err - } - syscerts.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = syscerts - } - return transport, nil - -} - -// oidcAuth generates the OIDC credential authenticator based on the specified cloud provider. -func (r *OCIRepositoryReconciler) oidcAuth(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Authenticator, error) { - url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) - ref, err := name.ParseReference(url) - if err != nil { - return nil, fmt.Errorf("failed to parse URL '%s': %w", obj.Spec.URL, err) - } - - opts := login.ProviderOptions{} - switch obj.Spec.Provider { - case sourcev1.AmazonOCIProvider: - opts.AwsAutoLogin = true - case sourcev1.AzureOCIProvider: - opts.AzureAutoLogin = true - case sourcev1.GoogleOCIProvider: - opts.GcpAutoLogin = true - } - - return login.NewManager().Login(ctx, url, ref, opts) -} - -// craneOptions sets the auth headers, timeout and user agent -// for all operations against remote container registries. -func (r *OCIRepositoryReconciler) craneOptions(ctx context.Context) []crane.Option { - options := []crane.Option{ - crane.WithContext(ctx), - crane.WithUserAgent(oci.UserAgent), - } - return options -} - -// reconcileStorage ensures the current state of the storage matches the -// desired and previously observed state. -// -// The garbage collection is executed based on the flag configured settings and -// may remove files that are beyond their TTL or the maximum number of files -// to survive a collection cycle. -// If the Artifact in the Status of the object disappeared from the Storage, -// it is removed from the object. -// If the object does not have an Artifact in its Status, a Reconciling -// condition is added. -// The hostname of any URL in the Status of the object are updated, to ensure -// they match the Storage server hostname of current runtime. -func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, - obj *sourcev1.OCIRepository, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) { - // Garbage collect previous advertised artifact(s) from storage - _ = r.garbageCollect(ctx, obj) - - // Determine if the advertised artifact is still in storage - if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { - obj.Status.Artifact = nil - obj.Status.URL = "" - // Remove the condition as the artifact doesn't exist. - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) - } - - // Record that we do not have an artifact - if obj.GetArtifact() == nil { - conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) - return sreconcile.ResultSuccess, nil - } - - // Always update URLs to ensure hostname is up-to-date - r.Storage.SetArtifactURL(obj.GetArtifact()) - obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - - return sreconcile.ResultSuccess, nil -} - -// reconcileArtifact archives a new Artifact to the Storage, if the current -// (Status) data on the object does not match the given. -// -// The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. -// If the given Artifact does not differ from the object's current, it returns -// early. -// On a successful archive, the Artifact in the Status of the object is set, -// and the symlink in the Storage is updated to its path. -func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, - obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) { - // Calculate revision - revision := metadata.Revision - - // Create artifact - artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - - // Set the ArtifactInStorageCondition if there's no drift. - defer func() { - if obj.GetArtifact().HasRevision(artifact.Revision) { - conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, - "stored artifact for digest '%s'", artifact.Revision) - } - }() - - // The artifact is up-to-date - if obj.GetArtifact().HasRevision(artifact.Revision) { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, - "artifact up-to-date with remote digest: '%s'", artifact.Revision) - return sreconcile.ResultSuccess, nil - } - - // Ensure target path exists and is a directory - if f, err := os.Stat(dir); err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to stat source path: %w", err), - sourcev1.StatOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } else if !f.IsDir() { - e := serror.NewGeneric( - fmt.Errorf("source path '%s' is not a directory", dir), - sourcev1.InvalidPathReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(artifact); err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to create artifact directory: %w", err), - sourcev1.DirCreationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - unlock, err := r.Storage.Lock(artifact) - if err != nil { - return sreconcile.ResultEmpty, serror.NewGeneric( - fmt.Errorf("failed to acquire lock for artifact: %w", err), - meta.FailedReason, - ) - } - defer unlock() - - // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, nil); err != nil { - e := serror.NewGeneric( - fmt.Errorf("unable to archive artifact to storage: %s", err), - sourcev1.ArchiveOperationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } - - // Record it on the object - obj.Status.Artifact = artifact.DeepCopy() - obj.Status.Artifact.Metadata = metadata.Metadata - - // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, - "failed to update status URL symlink: %s", err) - } - if url != "" { - obj.Status.URL = url - } - conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) - return sreconcile.ResultSuccess, nil -} - -// reconcileDelete handles the deletion of the object. -// It first garbage collects all Artifacts for the object from the Storage. -// Removing the finalizer from the object if successful. -func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) { - // Garbage collect the resource's artifacts - if err := r.garbageCollect(ctx, obj); err != nil { - // Return the error so we retry the failed garbage collection - return sreconcile.ResultEmpty, err - } - - // Remove our finalizer from the list - controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) - - // Stop reconciliation as the object is being deleted - return sreconcile.ResultEmpty, nil -} - -// garbageCollect performs a garbage collection for the given object. -// -// It removes all but the current Artifact from the Storage, unless the -// deletion timestamp on the object is set. Which will result in the -// removal of all Artifacts for the objects. -func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error { - if !obj.DeletionTimestamp.IsZero() { - if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - return serror.NewGeneric( - fmt.Errorf("garbage collection for deleted resource failed: %w", err), - "GarbageCollectionFailed", - ) - } else if deleted != "" { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - "garbage collected artifacts for deleted resource") - } - obj.Status.Artifact = nil - return nil - } - if obj.GetArtifact() != nil { - delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) - if err != nil { - return serror.NewGeneric( - fmt.Errorf("garbage collection of artifacts failed: %w", err), - "GarbageCollectionFailed", - ) - } - if len(delFiles) > 0 { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) - return nil - } - } - return nil -} - -// eventLogf records events, and logs at the same time. -// -// This log is different from the debug log in the EventRecorder, in the sense -// that this is a simple log. While the debug log contains complete details -// about the event. -func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context, - obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { - msg := fmt.Sprintf(messageFmt, args...) - // Log and emit event. - if eventType == corev1.EventTypeWarning { - ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) - } else { - ctrl.LoggerFrom(ctx).Info(msg) - } - r.Eventf(obj, eventType, reason, msg) -} - -// notify emits notification related to the reconciliation. -func (r *OCIRepositoryReconciler) notify(ctx context.Context, - oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) { - // Notify successful reconciliation for new artifact and recovery from any - // failure. - if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { - annotations := map[string]string{ - sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision, - sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum, - } - - var oldChecksum string - if oldObj.GetArtifact() != nil { - oldChecksum = oldObj.GetArtifact().Checksum - } - - message := fmt.Sprintf("stored artifact with digest '%s' from '%s'", newObj.Status.Artifact.Revision, newObj.Spec.URL) - - // enrich message with upstream annotations if found - if info := newObj.GetArtifact().Metadata; info != nil { - var source, revision string - if val, ok := info[oci.SourceAnnotation]; ok { - source = val - } - if val, ok := info[oci.RevisionAnnotation]; ok { - revision = val - } - if source != "" && revision != "" { - message = fmt.Sprintf("%s, origin source '%s', origin revision '%s'", message, source, revision) - } - } - - // Notify on new artifact and failure recovery. - if oldChecksum != newObj.GetArtifact().Checksum { - r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, - "NewArtifact", message) - ctrl.LoggerFrom(ctx).Info(message) - } else { - if sreconcile.FailureRecovery(oldObj, newObj, ociRepositoryFailConditions) { - r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, - meta.SucceededReason, message) - ctrl.LoggerFrom(ctx).Info(message) - } - } - } -} diff --git a/controllers/ocirepository_controller_test.go b/controllers/ocirepository_controller_test.go deleted file mode 100644 index b72413b1f..000000000 --- a/controllers/ocirepository_controller_test.go +++ /dev/null @@ -1,1599 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package controllers - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/darkowlzz/controller-check/status" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/oci" - "github.com/fluxcd/pkg/runtime/conditions" - "github.com/fluxcd/pkg/runtime/patch" - "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - serror "github.com/fluxcd/source-controller/internal/error" - sreconcile "github.com/fluxcd/source-controller/internal/reconcile" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/crane" - "github.com/google/go-containerregistry/pkg/registry" - gcrv1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/mutate" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" - "sigs.k8s.io/controller-runtime/pkg/client" - fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func TestOCIRepository_Reconcile(t *testing.T) { - g := NewWithT(t) - - // Registry server with public images - tmpDir := t.TempDir() - regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) - if err != nil { - g.Expect(err).ToNot(HaveOccurred()) - } - - podinfoVersions, err := pushMultiplePodinfoImages(regServer.registryHost, "6.1.4", "6.1.5", "6.1.6") - - tests := []struct { - name string - url string - tag string - semver string - digest string - assertArtifact []artifactFixture - }{ - { - name: "public tag", - url: podinfoVersions["6.1.6"].url, - tag: podinfoVersions["6.1.6"].tag, - digest: podinfoVersions["6.1.6"].digest.Hex, - assertArtifact: []artifactFixture{ - { - expectedPath: "kustomize/deployment.yaml", - expectedChecksum: "6fd625effe6bb805b6a78943ee082a4412e763edb7fcaed6e8fe644d06cbf423", - }, - { - expectedPath: "kustomize/hpa.yaml", - expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9", - }, - }, - }, - { - name: "public semver", - url: podinfoVersions["6.1.5"].url, - semver: ">= 6.1 <= 6.1.5", - digest: podinfoVersions["6.1.5"].digest.Hex, - assertArtifact: []artifactFixture{ - { - expectedPath: "kustomize/deployment.yaml", - expectedChecksum: "dce4f5f780a8e8994b06031e5b567bf488ceaaaabd9bd3fc278b4f3bfc8c577b", - }, - { - expectedPath: "kustomize/hpa.yaml", - expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - ns, err := testEnv.CreateNamespace(ctx, "ocirepository-reconcile-test") - g.Expect(err).ToNot(HaveOccurred()) - defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "ocirepository-reconcile", - Namespace: ns.Name, - }, - Spec: sourcev1.OCIRepositorySpec{ - URL: tt.url, - Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &sourcev1.OCIRepositoryRef{}, - }, - } - - if tt.tag != "" { - obj.Spec.Reference.Tag = tt.tag - } - if tt.semver != "" { - obj.Spec.Reference.SemVer = tt.semver - } - - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - // Wait for the finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return len(obj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for the object to be Ready - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - if !conditions.IsReady(obj) { - return false - } - readyCondition := conditions.Get(obj, meta.ReadyCondition) - return obj.Generation == readyCondition.ObservedGeneration && - obj.Generation == obj.Status.ObservedGeneration - }, timeout).Should(BeTrue()) - - // Check if the revision matches the expected digest - g.Expect(obj.Status.Artifact.Revision).To(Equal(tt.digest)) - - // Check if the metadata matches the expected annotations - g.Expect(obj.Status.Artifact.Metadata[oci.SourceAnnotation]).To(ContainSubstring("podinfo")) - g.Expect(obj.Status.Artifact.Metadata[oci.RevisionAnnotation]).To(ContainSubstring(tt.tag)) - - // Check if the artifact storage path matches the expected file path - localPath := testStorage.LocalPath(*obj.Status.Artifact) - t.Logf("artifact local path: %s", localPath) - - f, err := os.Open(localPath) - g.Expect(err).ToNot(HaveOccurred()) - defer f.Close() - - // create a tmp directory to extract artifact - tmp, err := os.MkdirTemp("", "ocirepository-test-") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmp) - - ep, err := untar.Untar(f, tmp) - g.Expect(err).ToNot(HaveOccurred()) - t.Logf("extracted summary: %s", ep) - - for _, af := range tt.assertArtifact { - expectedFile := filepath.Join(tmp, af.expectedPath) - g.Expect(expectedFile).To(BeAnExistingFile()) - - f2, err := os.Open(expectedFile) - g.Expect(err).ToNot(HaveOccurred()) - defer f2.Close() - - h := testStorage.Checksum(f2) - t.Logf("file %q hash: %q", expectedFile, h) - g.Expect(h).To(Equal(af.expectedChecksum)) - } - - // Check if the object status is valid - condns := &status.Conditions{NegativePolarity: ociRepositoryReadyCondition.NegativePolarity} - checker := status.NewChecker(testEnv.Client, condns) - checker.CheckErr(ctx, obj) - - // kstatus client conformance check - u, err := patch.ToUnstructured(obj) - g.Expect(err).ToNot(HaveOccurred()) - res, err := kstatus.Compute(u) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - - // Patch the object with reconcile request annotation. - patchHelper, err := patch.NewHelper(obj, testEnv.Client) - g.Expect(err).ToNot(HaveOccurred()) - annotations := map[string]string{ - meta.ReconcileRequestAnnotation: "now", - } - obj.SetAnnotations(annotations) - g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return false - } - return obj.Status.LastHandledReconcileAt == "now" - }, timeout).Should(BeTrue()) - - // Wait for the object to be deleted - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, obj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) - }) - } -} - -func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { - type secretOptions struct { - username string - password string - includeSA bool - includeSecret bool - } - - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(tlsCA) - - tests := []struct { - name string - url string - registryOpts registryOptions - craneOpts []crane.Option - secretOpts secretOptions - tlsCertSecret *corev1.Secret - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - }{ - { - name: "HTTP without basic auth", - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"), - }, - }, - { - name: "HTTP with basic auth secret", - want: sreconcile.ResultSuccess, - registryOpts: registryOptions{ - withBasicAuth: true, - }, - craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{ - Username: testRegistryUsername, - Password: testRegistryPassword, - }), - }, - secretOpts: secretOptions{ - username: testRegistryUsername, - password: testRegistryPassword, - includeSecret: true, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"), - }, - }, - { - name: "HTTP with serviceaccount", - want: sreconcile.ResultSuccess, - registryOpts: registryOptions{ - withBasicAuth: true, - }, - craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{ - Username: testRegistryUsername, - Password: testRegistryPassword, - }), - }, - secretOpts: secretOptions{ - username: testRegistryUsername, - password: testRegistryPassword, - includeSA: true, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"), - }, - }, - { - name: "HTTP registry - basic auth with missing secret", - want: sreconcile.ResultEmpty, - registryOpts: registryOptions{ - withBasicAuth: true, - }, - wantErr: true, - craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{ - Username: testRegistryUsername, - Password: testRegistryPassword, - }), - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "), - }, - }, - { - name: "HTTP registry - basic auth with invalid secret", - want: sreconcile.ResultEmpty, - wantErr: true, - registryOpts: registryOptions{ - withBasicAuth: true, - }, - craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{ - Username: testRegistryUsername, - Password: testRegistryPassword, - }), - }, - secretOpts: secretOptions{ - username: "wrong-pass", - password: "wrong-pass", - includeSecret: true, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "), - }, - }, - { - name: "HTTP registry - basic auth with invalid serviceaccount", - want: sreconcile.ResultEmpty, - wantErr: true, - registryOpts: registryOptions{ - withBasicAuth: true, - }, - craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{ - Username: testRegistryUsername, - Password: testRegistryPassword, - }), - }, - secretOpts: secretOptions{ - username: "wrong-pass", - password: "wrong-pass", - includeSA: true, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "), - }, - }, - { - name: "HTTPS with valid certfile", - want: sreconcile.ResultSuccess, - registryOpts: registryOptions{ - withTLS: true, - }, - craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: pool, - }, - }), - }, - tlsCertSecret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", - }, - Data: map[string][]byte{ - "caFile": tlsCA, - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"), - }, - }, - { - name: "HTTPS without certfile", - want: sreconcile.ResultEmpty, - wantErr: true, - registryOpts: registryOptions{ - withTLS: true, - }, - craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: pool, - }, - }), - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "), - }, - }, - { - name: "HTTPS with invalid certfile", - want: sreconcile.ResultEmpty, - wantErr: true, - registryOpts: registryOptions{ - withTLS: true, - }, - craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: pool, - }, - }), - }, - tlsCertSecret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", - }, - Data: map[string][]byte{ - "caFile": []byte("invalid"), - }, - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "auth-strategy-", - }, - Spec: sourcev1.OCIRepositorySpec{ - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - workspaceDir := t.TempDir() - server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts) - - g.Expect(err).NotTo(HaveOccurred()) - - img, err := createPodinfoImageFromTar("podinfo-6.1.6.tar", "6.1.6", server.registryHost, tt.craneOpts...) - g.Expect(err).ToNot(HaveOccurred()) - obj.Spec.URL = img.url - obj.Spec.Reference = &sourcev1.OCIRepositoryRef{ - Tag: img.tag, - } - - if tt.secretOpts.username != "" && tt.secretOpts.password != "" { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "auth-secretref", - }, - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - ".dockerconfigjson": []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`, - server.registryHost, tt.secretOpts.username, tt.secretOpts.password)), - }, - } - - builder.WithObjects(secret) - - if tt.secretOpts.includeSA { - serviceAccount := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "sa-ocitest", - }, - ImagePullSecrets: []corev1.LocalObjectReference{{Name: secret.Name}}, - } - builder.WithObjects(serviceAccount) - obj.Spec.ServiceAccountName = serviceAccount.Name - } - - if tt.secretOpts.includeSecret { - obj.Spec.SecretRef = &meta.LocalObjectReference{ - Name: secret.Name, - } - } - } - - if tt.tlsCertSecret != nil { - builder.WithObjects(tt.tlsCertSecret) - obj.Spec.CertSecretRef = &meta.LocalObjectReference{ - Name: tt.tlsCertSecret.Name, - } - } - - r := &OCIRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - opts := r.craneOptions(ctx) - opts = append(opts, crane.WithAuthFromKeychain(authn.DefaultKeychain)) - repoURL, err := r.getArtifactURL(obj, opts) - g.Expect(err).To(BeNil()) - - assertConditions := tt.assertConditions - for k := range assertConditions { - assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", img.digest.Hex) - assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", repoURL) - } - - tmpDir := t.TempDir() - got, err := r.reconcileSource(ctx, obj, &sourcev1.Artifact{}, tmpDir) - if tt.wantErr { - g.Expect(err).ToNot(BeNil()) - } else { - g.Expect(err).To(BeNil()) - } - g.Expect(got).To(Equal(tt.want)) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestOCIRepository_CertSecret(t *testing.T) { - g := NewWithT(t) - - srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err := createTLSServer() - g.Expect(err).ToNot(HaveOccurred()) - - srv.StartTLS() - defer srv.Close() - - transport := &http.Transport{ - TLSClientConfig: &tls.Config{}, - } - // Use the server cert as a CA cert, so the client trusts the - // server cert. (Only works because the server uses the same - // cert in both roles). - pool := x509.NewCertPool() - pool.AddCert(srv.Certificate()) - transport.TLSClientConfig.RootCAs = pool - transport.TLSClientConfig.Certificates = []tls.Certificate{clientTLSCert} - - srv.Client().Transport = transport - pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", srv.URL, []crane.Option{ - crane.WithTransport(srv.Client().Transport), - }...) - g.Expect(err).NotTo(HaveOccurred()) - - tlsSecretClientCert := corev1.Secret{ - StringData: map[string]string{ - oci.CACert: string(rootCertPEM), - oci.ClientCert: string(clientCertPEM), - oci.ClientKey: string(clientKeyPEM), - }, - } - - tests := []struct { - name string - url string - digest gcrv1.Hash - certSecret *corev1.Secret - expectreadyconition bool - expectedstatusmessage string - }{ - { - name: "test connection with CACert, Client Cert and Private Key", - url: pi.url, - digest: pi.digest, - certSecret: &tlsSecretClientCert, - expectreadyconition: true, - expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.Hex), - }, - { - name: "test connection with no secret", - url: pi.url, - digest: pi.digest, - expectreadyconition: false, - expectedstatusmessage: "unexpected status code 400 Bad Request: Client sent an HTTP request to an HTTPS server", - }, - { - name: "test connection with with incorrect private key", - url: pi.url, - digest: pi.digest, - certSecret: &corev1.Secret{ - StringData: map[string]string{ - oci.CACert: string(rootCertPEM), - oci.ClientCert: string(clientCertPEM), - oci.ClientKey: string("invalid-key"), - }, - }, - expectreadyconition: false, - expectedstatusmessage: "failed to generate transport for '': tls: failed to find any PEM data in key input", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test") - g.Expect(err).ToNot(HaveOccurred()) - defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "ocirepository-test-resource", - Namespace: ns.Name, - }, - Spec: sourcev1.OCIRepositorySpec{ - URL: tt.url, - Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, - }, - } - - if tt.certSecret != nil { - tt.certSecret.ObjectMeta = metav1.ObjectMeta{ - GenerateName: "cert-secretref", - Namespace: ns.Name, - } - - g.Expect(testEnv.CreateAndWait(ctx, tt.certSecret)).To(Succeed()) - defer func() { g.Expect(testEnv.Delete(ctx, tt.certSecret)).To(Succeed()) }() - - obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: tt.certSecret.Name} - } - - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - - resultobj := sourcev1.OCIRepository{} - - // Wait for the finalizer to be set - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, &resultobj); err != nil { - return false - } - return len(resultobj.Finalizers) > 0 - }, timeout).Should(BeTrue()) - - // Wait for the object to fail - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, &resultobj); err != nil { - return false - } - readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) - if readyCondition == nil { - return false - } - return obj.Generation == readyCondition.ObservedGeneration && - conditions.IsReady(&resultobj) == tt.expectreadyconition - }, timeout).Should(BeTrue()) - - tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) - - readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) - g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) - - // Wait for the object to be deleted - g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, &resultobj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) - }) - } -} - -func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { - g := NewWithT(t) - - tmpDir := t.TempDir() - server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - - podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, "6.1.4", "6.1.5", "6.1.6") - img6 := podinfoVersions["6.1.6"] - img5 := podinfoVersions["6.1.5"] - - tests := []struct { - name string - reference *sourcev1.OCIRepositoryRef - want sreconcile.Result - wantErr bool - wantRevision string - assertConditions []metav1.Condition - }{ - { - name: "no reference (latest tag)", - want: sreconcile.ResultSuccess, - wantRevision: img6.digest.Hex, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), - }, - }, - { - name: "tag reference", - reference: &sourcev1.OCIRepositoryRef{ - Tag: "6.1.6", - }, - want: sreconcile.ResultSuccess, - wantRevision: img6.digest.Hex, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), - }, - }, - { - name: "semver reference", - reference: &sourcev1.OCIRepositoryRef{ - SemVer: ">= 6.1.5", - }, - want: sreconcile.ResultSuccess, - wantRevision: img6.digest.Hex, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), - }, - }, - { - name: "digest reference", - reference: &sourcev1.OCIRepositoryRef{ - Digest: img6.digest.String(), - }, - wantRevision: img6.digest.Hex, - want: sreconcile.ResultSuccess, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), - }, - }, - { - name: "invalid tag reference", - reference: &sourcev1.OCIRepositoryRef{ - Tag: "6.1.0", - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact"), - }, - }, - { - name: "invalid semver reference", - reference: &sourcev1.OCIRepositoryRef{ - SemVer: "<= 6.1.0", - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "failed to determine the artifact tag for 'oci://%s/podinfo': no match found for semver: <= 6.1.0", server.registryHost), - }, - }, - { - name: "invalid digest reference", - reference: &sourcev1.OCIRepositoryRef{ - Digest: "invalid", - }, - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact"), - }, - }, - { - name: "semver should take precedence over tag", - reference: &sourcev1.OCIRepositoryRef{ - SemVer: ">= 6.1.5", - Tag: "6.1.5", - }, - want: sreconcile.ResultSuccess, - wantRevision: img6.digest.Hex, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), - }, - }, - { - name: "digest should take precedence over semver", - reference: &sourcev1.OCIRepositoryRef{ - Tag: "6.1.6", - SemVer: ">= 6.1.6", - Digest: img5.digest.String(), - }, - want: sreconcile.ResultSuccess, - wantRevision: img5.digest.Hex, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"), - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), - }, - }, - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - - r := &OCIRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "checkout-strategy-", - }, - Spec: sourcev1.OCIRepositorySpec{ - URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - if tt.reference != nil { - obj.Spec.Reference = tt.reference - } - - artifact := &sourcev1.Artifact{} - tmpDir := t.TempDir() - got, err := r.reconcileSource(ctx, obj, artifact, tmpDir) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(artifact.Revision).To(Equal(tt.wantRevision)) - } - - g.Expect(got).To(Equal(tt.want)) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - }) - } -} - -func TestOCIRepository_reconcileArtifact(t *testing.T) { - g := NewWithT(t) - - tests := []struct { - name string - targetPath string - artifact *sourcev1.Artifact - beforeFunc func(obj *sourcev1.OCIRepository) - want sreconcile.Result - wantErr bool - assertArtifact *sourcev1.Artifact - assertPaths []string - assertConditions []metav1.Condition - }{ - { - name: "Archiving Artifact creates correct files and condition", - targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ - Revision: "revision", - }, - beforeFunc: func(obj *sourcev1.OCIRepository) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest") - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "latest.tar.gz", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), - }, - }, - { - name: "No status changes if artifact is already present", - artifact: &sourcev1.Artifact{ - Revision: "revision", - }, - targetPath: "testdata/oci/repository", - want: sreconcile.ResultSuccess, - beforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: "revision", - } - }, - assertArtifact: &sourcev1.Artifact{ - Revision: "revision", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), - }, - }, - { - name: "target path doesn't exist", - targetPath: "testdata/oci/non-existent", - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path: "), - }, - }, - { - name: "target path is a file", - targetPath: "testdata/oci/repository/foo.txt", - want: sreconcile.ResultEmpty, - wantErr: true, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "source path 'testdata/oci/repository/foo.txt' is not a directory"), - }, - }, - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - - r := &OCIRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "reconcile-artifact-", - }, - } - if tt.beforeFunc != nil { - tt.beforeFunc(obj) - } - - artifact := &sourcev1.Artifact{} - if tt.artifact != nil { - artifact = tt.artifact - } - got, err := r.reconcileArtifact(ctx, obj, artifact, tt.targetPath) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - g.Expect(got).To(Equal(tt.want)) - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - if tt.assertArtifact != nil { - g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.artifact)) - } - - for _, path := range tt.assertPaths { - localPath := testStorage.LocalPath(*obj.GetArtifact()) - path = filepath.Join(filepath.Dir(localPath), path) - _, err := os.Lstat(path) - g.Expect(err).ToNot(HaveOccurred()) - } - }) - } -} - -func TestOCIRepository_getArtifactURL(t *testing.T) { - g := NewWithT(t) - - tmpDir := t.TempDir() - server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - - imgs, err := pushMultiplePodinfoImages(server.registryHost, "6.1.4", "6.1.5", "6.1.6") - g.Expect(err).ToNot(HaveOccurred()) - - tests := []struct { - name string - url string - reference *sourcev1.OCIRepositoryRef - wantErr bool - want string - }{ - { - name: "valid url with no reference", - url: "oci://ghcr.io/stefanprodan/charts", - want: "ghcr.io/stefanprodan/charts", - }, - { - name: "valid url with tag reference", - url: "oci://ghcr.io/stefanprodan/charts", - reference: &sourcev1.OCIRepositoryRef{ - Tag: "6.1.6", - }, - want: "ghcr.io/stefanprodan/charts:6.1.6", - }, - { - name: "valid url with digest reference", - url: "oci://ghcr.io/stefanprodan/charts", - reference: &sourcev1.OCIRepositoryRef{ - Digest: imgs["6.1.6"].digest.Hex, - }, - want: "ghcr.io/stefanprodan/charts@" + imgs["6.1.6"].digest.Hex, - }, - { - name: "valid url with semver reference", - url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - reference: &sourcev1.OCIRepositoryRef{ - SemVer: ">= 6.1.6", - }, - want: server.registryHost + "/podinfo:6.1.6", - }, - { - name: "invalid url without oci prefix", - url: "ghcr.io/stefanprodan/charts", - wantErr: true, - }, - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - r := &OCIRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "artifact-url-", - }, - Spec: sourcev1.OCIRepositorySpec{ - URL: tt.url, - Interval: metav1.Duration{Duration: interval}, - Timeout: &metav1.Duration{Duration: timeout}, - }, - } - - if tt.reference != nil { - obj.Spec.Reference = tt.reference - } - - opts := r.craneOptions(ctx) - opts = append(opts, crane.WithAuthFromKeychain(authn.DefaultKeychain)) - got, err := r.getArtifactURL(obj, opts) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) - }) - } -} - -func TestOCIRepository_stalled(t *testing.T) { - g := NewWithT(t) - - ns, err := testEnv.CreateNamespace(ctx, "ocirepository-stalled-test") - g.Expect(err).ToNot(HaveOccurred()) - defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "ocirepository-reconcile", - Namespace: ns.Name, - }, - Spec: sourcev1.OCIRepositorySpec{ - URL: "oci://ghcr.io/test/test:v1", - Interval: metav1.Duration{Duration: 60 * time.Minute}, - }, - } - - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - - key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - resultobj := sourcev1.OCIRepository{} - - // Wait for the object to fail - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, &resultobj); err != nil { - return false - } - readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) - if readyCondition == nil { - return false - } - return obj.Generation == readyCondition.ObservedGeneration && - !conditions.IsReady(&resultobj) - }, timeout).Should(BeTrue()) - - // Verify that stalled condition is present in status - stalledCondition := conditions.Get(&resultobj, meta.StalledCondition) - g.Expect(stalledCondition).ToNot(BeNil()) - g.Expect(stalledCondition.Reason).Should(Equal(sourcev1.URLInvalidReason)) -} - -func TestOCIRepository_reconcileStorage(t *testing.T) { - g := NewWithT(t) - - tests := []struct { - name string - beforeFunc func(obj *sourcev1.OCIRepository) error - want sreconcile.Result - wantErr bool - assertConditions []metav1.Condition - assertArtifact *sourcev1.Artifact - assertPaths []string - }{ - { - name: "garbage collects", - beforeFunc: func(obj *sourcev1.OCIRepository) error { - revisions := []string{"a", "b", "c", "d"} - - for n := range revisions { - v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ - Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", v), - Revision: v, - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { - return err - } - - if n != len(revisions)-1 { - time.Sleep(time.Second) - } - } - - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/oci-reconcile-storage/d.txt", - Revision: "d", - Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", - URL: testStorage.Hostname + "/oci-reconcile-storage/d.txt", - Size: int64p(int64(len("d"))), - }, - assertPaths: []string{ - "/oci-reconcile-storage/d.txt", - "/oci-reconcile-storage/c.txt", - "!/oci-reconcile-storage/b.txt", - "!/oci-reconcile-storage/a.txt", - }, - want: sreconcile.ResultSuccess, - }, - { - name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.OCIRepository) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/oci-reconcile-storage/invalid.txt", - Revision: "e", - } - testStorage.SetArtifactURL(obj.Status.Artifact) - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "!/oci-reconcile-storage/invalid.txt", - }, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), - }, - }, - { - name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.OCIRepository) error { - obj.Status.Artifact = &sourcev1.Artifact{ - Path: "/oci-reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: "http://outdated.com/oci-reconcile-storage/hostname.txt", - } - if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { - return err - } - if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { - return err - } - return nil - }, - want: sreconcile.ResultSuccess, - assertPaths: []string{ - "/oci-reconcile-storage/hostname.txt", - }, - assertArtifact: &sourcev1.Artifact{ - Path: "/oci-reconcile-storage/hostname.txt", - Revision: "f", - Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", - URL: testStorage.Hostname + "/oci-reconcile-storage/hostname.txt", - Size: int64p(int64(len("file"))), - }, - }, - } - - builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) - r := &OCIRepositoryReconciler{ - Client: builder.Build(), - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - }, - } - - g.Expect(tt.beforeFunc(obj)).To(Succeed()) - got, err := r.reconcileStorage(ctx, obj, &sourcev1.Artifact{}, "") - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - g.Expect(got).To(Equal(tt.want)) - g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) - if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { - g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) - } - - g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - - for _, p := range tt.assertPaths { - absoluteP := filepath.Join(testStorage.BasePath, p) - if !strings.HasPrefix(p, "!") { - g.Expect(absoluteP).To(BeAnExistingFile()) - continue - } - - g.Expect(absoluteP).ToNot(BeAnExistingFile()) - } - }) - } -} - -func TestOCIRepository_ReconcileDelete(t *testing.T) { - g := NewWithT(t) - - r := &OCIRepositoryReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, - } - - obj := &sourcev1.OCIRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: "reconcile-delete-", - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{ - sourcev1.SourceFinalizer, - }, - }, - Status: sourcev1.OCIRepositoryStatus{}, - } - - artifact := testStorage.NewArtifactFor(sourcev1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") - obj.Status.Artifact = &artifact - - got, err := r.reconcileDelete(ctx, obj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(sreconcile.ResultEmpty)) - g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) - g.Expect(obj.Status.Artifact).To(BeNil()) -} - -func TestOCIRepositoryReconciler_notify(t *testing.T) { - - noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason") - noopErr.Ignore = true - - tests := []struct { - name string - res sreconcile.Result - resErr error - oldObjBeforeFunc func(obj *sourcev1.OCIRepository) - newObjBeforeFunc func(obj *sourcev1.OCIRepository) - commit git.Commit - wantEvent string - }{ - { - name: "error - no event", - res: sreconcile.ResultEmpty, - resErr: errors.New("some error"), - }, - { - name: "new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: "xxx", - Checksum: "yyy", - Metadata: map[string]string{ - oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", - oci.RevisionAnnotation: "6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872", - }, - } - }, - wantEvent: "Normal NewArtifact stored artifact with digest 'xxx' from 'oci://newurl.io', origin source 'https://github.com/stefanprodan/podinfo', origin revision '6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872'", - }, - { - name: "recovery from failure", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal Succeeded stored artifact with digest 'xxx' from 'oci://newurl.io'", - }, - { - name: "recovery and new artifact", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") - conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") - }, - newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - wantEvent: "Normal NewArtifact stored artifact with digest 'aaa' from 'oci://newurl.io'", - }, - { - name: "no updates", - res: sreconcile.ResultSuccess, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") - }, - }, - { - name: "no updates on requeue", - res: sreconcile.ResultRequeue, - resErr: nil, - oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "ready") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - recorder := record.NewFakeRecorder(32) - - oldObj := &sourcev1.OCIRepository{} - newObj := oldObj.DeepCopy() - - if tt.oldObjBeforeFunc != nil { - tt.oldObjBeforeFunc(oldObj) - } - if tt.newObjBeforeFunc != nil { - tt.newObjBeforeFunc(newObj) - } - - reconciler := &OCIRepositoryReconciler{ - EventRecorder: recorder, - } - reconciler.notify(ctx, oldObj, newObj, tt.res, tt.resErr) - - select { - case x, ok := <-recorder.Events: - g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") - if tt.wantEvent != "" { - g.Expect(x).To(ContainSubstring(tt.wantEvent)) - } - default: - if tt.wantEvent != "" { - t.Errorf("expected some event to be emitted") - } - } - }) - } -} - -type artifactFixture struct { - expectedPath string - expectedChecksum string -} - -type podinfoImage struct { - url string - tag string - digest gcrv1.Hash -} - -func createPodinfoImageFromTar(tarFileName, tag, registryURL string, opts ...crane.Option) (*podinfoImage, error) { - // Create Image - image, err := crane.Load(path.Join("testdata", "podinfo", tarFileName)) - if err != nil { - return nil, err - } - - image = setPodinfoImageAnnotations(image, tag) - - // url.Parse doesn't handle urls with no scheme well e.g localhost: - if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) { - registryURL = fmt.Sprintf("http://%s", registryURL) - } - - myURL, err := url.Parse(registryURL) - if err != nil { - return nil, err - } - repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host) - - // Image digest - podinfoImageDigest, err := image.Digest() - if err != nil { - return nil, err - } - - // Push image - err = crane.Push(image, repositoryURL, opts...) - if err != nil { - return nil, err - } - - // Tag the image - err = crane.Tag(repositoryURL, tag, opts...) - if err != nil { - return nil, err - } - - return &podinfoImage{ - url: "oci://" + repositoryURL, - tag: tag, - digest: podinfoImageDigest, - }, nil -} - -func pushMultiplePodinfoImages(serverURL string, versions ...string) (map[string]podinfoImage, error) { - podinfoVersions := make(map[string]podinfoImage) - - for i := 0; i < len(versions); i++ { - pi, err := createPodinfoImageFromTar(fmt.Sprintf("podinfo-%s.tar", versions[i]), versions[i], serverURL) - if err != nil { - return nil, err - } - - podinfoVersions[versions[i]] = *pi - - } - - return podinfoVersions, nil -} - -func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { - metadata := map[string]string{ - oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", - oci.RevisionAnnotation: fmt.Sprintf("%s/SHA", tag), - } - return mutate.Annotations(img, metadata).(gcrv1.Image) -} - -// These two taken verbatim from https://ericchiang.github.io/post/go-tls/ -func certTemplate() (*x509.Certificate, error) { - // generate a random serial number (a real cert authority would - // have some logic behind this) - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, errors.New("failed to generate serial number: " + err.Error()) - } - - tmpl := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"Flux project"}}, - SignatureAlgorithm: x509.SHA256WithRSA, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour), // valid for an hour - BasicConstraintsValid: true, - } - return &tmpl, nil -} - -func createCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) ( - cert *x509.Certificate, certPEM []byte, err error) { - - certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv) - if err != nil { - return - } - // parse the resulting certificate so we can use it again - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return - } - // PEM encode the certificate (this is a standard TLS encoding) - b := pem.Block{Type: "CERTIFICATE", Bytes: certDER} - certPEM = pem.EncodeToMemory(&b) - return -} - -func createTLSServer() (*httptest.Server, []byte, []byte, []byte, tls.Certificate, error) { - var clientTLSCert tls.Certificate - var rootCertPEM, clientCertPEM, clientKeyPEM []byte - - srv := httptest.NewUnstartedServer(registry.New()) - - // Create a self-signed cert to use as the CA and server cert. - rootKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - rootCertTmpl, err := certTemplate() - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - rootCertTmpl.IsCA = true - rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature - rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} - rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")} - var rootCert *x509.Certificate - rootCert, rootCertPEM, err = createCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey) - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - - rootKeyPEM := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rootKey), - }) - - // Create a TLS cert using the private key and certificate. - rootTLSCert, err := tls.X509KeyPair(rootCertPEM, rootKeyPEM) - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - - // To trust a client certificate, the server must be given a - // CA cert pool. - pool := x509.NewCertPool() - pool.AddCert(rootCert) - - srv.TLS = &tls.Config{ - ClientAuth: tls.RequireAndVerifyClientCert, - Certificates: []tls.Certificate{rootTLSCert}, - ClientCAs: pool, - } - - // Create a client cert, signed by the "CA". - clientKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - clientCertTmpl, err := certTemplate() - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature - clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - _, clientCertPEM, err = createCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey) - if err != nil { - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err - } - // Encode and load the cert and private key for the client. - clientKeyPEM = pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey), - }) - clientTLSCert, err = tls.X509KeyPair(clientCertPEM, clientKeyPEM) - return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err -} diff --git a/controllers/storage.go b/controllers/storage.go deleted file mode 100644 index c5fd586f0..000000000 --- a/controllers/storage.go +++ /dev/null @@ -1,649 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "archive/tar" - "compress/gzip" - "context" - "crypto/sha256" - "fmt" - "hash" - "io" - "io/fs" - "net/url" - "os" - "path/filepath" - "sort" - "strings" - "time" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/fluxcd/pkg/lockedfile" - "github.com/fluxcd/pkg/untar" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kerrors "k8s.io/apimachinery/pkg/util/errors" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - sourcefs "github.com/fluxcd/source-controller/internal/fs" - "github.com/fluxcd/source-controller/pkg/sourceignore" -) - -const GarbageCountLimit = 1000 - -// Storage manages artifacts -type Storage struct { - // BasePath is the local directory path where the source artifacts are stored. - BasePath string `json:"basePath"` - - // Hostname is the file server host name used to compose the artifacts URIs. - Hostname string `json:"hostname"` - - // ArtifactRetentionTTL is the duration of time that artifacts will be kept - // in storage before being garbage collected. - ArtifactRetentionTTL time.Duration `json:"artifactRetentionTTL"` - - // ArtifactRetentionRecords is the maximum number of artifacts to be kept in - // storage after a garbage collection. - ArtifactRetentionRecords int `json:"artifactRetentionRecords"` -} - -// NewStorage creates the storage helper for a given path and hostname. -func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Duration, artifactRetentionRecords int) (*Storage, error) { - if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { - return nil, fmt.Errorf("invalid dir path: %s", basePath) - } - return &Storage{ - BasePath: basePath, - Hostname: hostname, - ArtifactRetentionTTL: artifactRetentionTTL, - ArtifactRetentionRecords: artifactRetentionRecords, - }, nil -} - -// NewArtifactFor returns a new v1beta1.Artifact. -func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact { - path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName) - artifact := sourcev1.Artifact{ - Path: path, - Revision: revision, - } - s.SetArtifactURL(&artifact) - return artifact -} - -// SetArtifactURL sets the URL on the given v1beta1.Artifact. -func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { - if artifact.Path == "" { - return - } - format := "http://%s/%s" - if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { - format = "%s/%s" - } - artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) -} - -// SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. -func (s Storage) SetHostname(URL string) string { - u, err := url.Parse(URL) - if err != nil { - return "" - } - u.Host = s.Hostname - return u.String() -} - -// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir. -func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error { - dir := filepath.Dir(s.LocalPath(artifact)) - return os.MkdirAll(dir, 0o700) -} - -// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir. -func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) { - var deletedDir string - dir := filepath.Dir(s.LocalPath(artifact)) - // Check if the dir exists. - _, err := os.Stat(dir) - if err == nil { - deletedDir = dir - } - return deletedDir, os.RemoveAll(dir) -} - -// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one. -func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) { - deletedFiles := []string{} - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - var errors []string - _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - - if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { - if err := os.Remove(path); err != nil { - errors = append(errors, info.Name()) - } else { - // Collect the successfully deleted file paths. - deletedFiles = append(deletedFiles, path) - } - } - return nil - }) - - if len(errors) > 0 { - return deletedFiles, fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) - } - return deletedFiles, nil -} - -// getGarbageFiles returns all files that need to be garbage collected for the given artifact. -// Garbage files are determined based on the below flow: -// 1. collect all files with an expired ttl -// 2. if we satisfy maxItemsToBeRetained, then return -// 3. else, remove all files till the latest n files remain, where n=maxItemsToBeRetained -func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) ([]string, error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - garbageFiles := []string{} - filesWithCreatedTs := make(map[time.Time]string) - // sortedPaths contain all files sorted according to their created ts. - sortedPaths := []string{} - now := time.Now().UTC() - totalFiles := 0 - var errors []string - creationTimestamps := []time.Time{} - _ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - if totalFiles >= totalCountLimit { - return fmt.Errorf("reached file walking limit, already walked over: %d", totalFiles) - } - info, err := d.Info() - if err != nil { - errors = append(errors, err.Error()) - return nil - } - createdAt := info.ModTime().UTC() - diff := now.Sub(createdAt) - // Compare the time difference between now and the time at which the file was created - // with the provided TTL. Delete if the difference is greater than the TTL. - expired := diff > ttl - if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { - if path != localPath && expired { - garbageFiles = append(garbageFiles, path) - } - totalFiles += 1 - filesWithCreatedTs[createdAt] = path - creationTimestamps = append(creationTimestamps, createdAt) - } - return nil - - }) - if len(errors) > 0 { - return nil, fmt.Errorf("can't walk over file: %s", strings.Join(errors, ",")) - } - - // We already collected enough garbage files to satisfy the no. of max - // items that are supposed to be retained, so exit early. - if totalFiles-len(garbageFiles) < maxItemsToBeRetained { - return garbageFiles, nil - } - - // sort all timestamps in an ascending order. - sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) }) - for _, ts := range creationTimestamps { - path, ok := filesWithCreatedTs[ts] - if !ok { - return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts) - } - sortedPaths = append(sortedPaths, path) - } - - var collected int - noOfGarbageFiles := len(garbageFiles) - for _, path := range sortedPaths { - if path != localPath && !stringInSlice(path, garbageFiles) { - // If we previously collected a few garbage files with an expired ttl, then take that into account - // when checking whether we need to remove more files to satisfy the max no. of items allowed - // in the filesystem, along with the no. of files already removed in this loop. - if noOfGarbageFiles > 0 { - if (len(sortedPaths) - collected - len(garbageFiles)) > maxItemsToBeRetained { - garbageFiles = append(garbageFiles, path) - collected += 1 - } - } else { - if len(sortedPaths)-collected > maxItemsToBeRetained { - garbageFiles = append(garbageFiles, path) - collected += 1 - } - } - } - } - - return garbageFiles, nil -} - -// GarbageCollect removes all garabge files in the artifact dir according to the provided -// retention options. -func (s *Storage) GarbageCollect(ctx context.Context, artifact sourcev1.Artifact, timeout time.Duration) ([]string, error) { - delFilesChan := make(chan []string) - errChan := make(chan error) - // Abort if it takes more than the provided timeout duration. - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - go func() { - garbageFiles, err := s.getGarbageFiles(artifact, GarbageCountLimit, s.ArtifactRetentionRecords, s.ArtifactRetentionTTL) - if err != nil { - errChan <- err - return - } - var errors []error - var deleted []string - if len(garbageFiles) > 0 { - for _, file := range garbageFiles { - err := os.Remove(file) - if err != nil { - errors = append(errors, err) - } else { - deleted = append(deleted, file) - } - } - } - if len(errors) > 0 { - errChan <- kerrors.NewAggregate(errors) - return - } - delFilesChan <- deleted - }() - - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case delFiles := <-delFilesChan: - return delFiles, nil - case err := <-errChan: - return nil, err - } - } -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file. -func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool { - fi, err := os.Lstat(s.LocalPath(artifact)) - if err != nil { - return false - } - return fi.Mode().IsRegular() -} - -// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path -// and/or os.FileInfo. -type ArchiveFileFilter func(p string, fi os.FileInfo) bool - -// SourceIgnoreFilter returns an ArchiveFileFilter that filters out files matching sourceignore.VCSPatterns and any of -// the provided patterns. -// If an empty gitignore.Pattern slice is given, the matcher is set to sourceignore.NewDefaultMatcher. -func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilter { - matcher := sourceignore.NewDefaultMatcher(ps, domain) - if len(ps) > 0 { - ps = append(sourceignore.VCSPatterns(domain), ps...) - matcher = sourceignore.NewMatcher(ps) - } - return func(p string, fi os.FileInfo) bool { - return matcher.Match(strings.Split(p, string(filepath.Separator)), fi.IsDir()) - } -} - -// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding -// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example, -// the user and group name) is stripped from file headers. -// If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) { - if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { - return fmt.Errorf("invalid dir path: %s", dir) - } - - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tmpName := tf.Name() - defer func() { - if err != nil { - os.Remove(tmpName) - } - }() - - h := newHash() - sz := &writeCounter{} - mw := io.MultiWriter(h, tf, sz) - - gw := gzip.NewWriter(mw) - tw := tar.NewWriter(gw) - if err := filepath.Walk(dir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - // Ignore anything that is not a file or directories e.g. symlinks - if m := fi.Mode(); !(m.IsRegular() || m.IsDir()) { - return nil - } - - // Skip filtered files - if filter != nil && filter(p, fi) { - return nil - } - - header, err := tar.FileInfoHeader(fi, p) - if err != nil { - return err - } - // The name needs to be modified to maintain directory structure - // as tar.FileInfoHeader only has access to the base name of the file. - // Ref: https://golang.org/src/archive/tar/common.go?#L626 - relFilePath := p - if filepath.IsAbs(dir) { - relFilePath, err = filepath.Rel(dir, p) - if err != nil { - return err - } - } - header.Name = relFilePath - - // We want to remove any environment specific data as well, this - // ensures the checksum is purely content based. - header.Gid = 0 - header.Uid = 0 - header.Uname = "" - header.Gname = "" - header.ModTime = time.Time{} - header.AccessTime = time.Time{} - header.ChangeTime = time.Time{} - - if err := tw.WriteHeader(header); err != nil { - return err - } - - if !fi.Mode().IsRegular() { - return nil - } - f, err := os.Open(p) - if err != nil { - f.Close() - return err - } - if _, err := io.Copy(tw, f); err != nil { - f.Close() - return err - } - return f.Close() - }); err != nil { - tw.Close() - gw.Close() - tf.Close() - return err - } - - if err := tw.Close(); err != nil { - gw.Close() - tf.Close() - return err - } - if err := gw.Close(); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tmpName, 0o600); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tmpName, localPath); err != nil { - return err - } - - artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil)) - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path. -// If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - h := newHash() - sz := &writeCounter{} - mw := io.MultiWriter(h, tf, sz) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tfName, mode); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil)) - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path. -// If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - h := newHash() - sz := &writeCounter{} - mw := io.MultiWriter(h, tf, sz) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil)) - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact. -// If successful, the checksum and last update time on the artifact is set. -func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) { - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { - if cerr := f.Close(); cerr != nil && err == nil { - err = cerr - } - }() - err = s.Copy(artifact, f) - return err -} - -// CopyToPath copies the contents in the (sub)path of the given artifact to the given path. -func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error { - // create a tmp directory to store artifact - tmp, err := os.MkdirTemp("", "flux-include-") - if err != nil { - return err - } - defer os.RemoveAll(tmp) - - // read artifact file content - localPath := s.LocalPath(*artifact) - f, err := os.Open(localPath) - if err != nil { - return err - } - defer f.Close() - - // untar the artifact - untarPath := filepath.Join(tmp, "unpack") - if _, err = untar.Untar(f, untarPath); err != nil { - return err - } - - // create the destination parent dir - if err = os.MkdirAll(filepath.Dir(toPath), os.ModePerm); err != nil { - return err - } - - // copy the artifact content to the destination dir - fromPath, err := securejoin.SecureJoin(untarPath, subPath) - if err != nil { - return err - } - if err := sourcefs.RenameWithFallback(fromPath, toPath); err != nil { - return err - } - return nil -} - -// Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink. -func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - link := filepath.Join(dir, linkName) - tmpLink := link + ".tmp" - - if err := os.Remove(tmpLink); err != nil && !os.IsNotExist(err) { - return "", err - } - - if err := os.Symlink(localPath, tmpLink); err != nil { - return "", err - } - - if err := os.Rename(tmpLink, link); err != nil { - return "", err - } - - url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)) - return url, nil -} - -// Checksum returns the SHA256 checksum for the data of the given io.Reader as a string. -func (s *Storage) Checksum(reader io.Reader) string { - h := newHash() - _, _ = io.Copy(h, reader) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// Lock creates a file lock for the given v1beta1.Artifact. -func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) { - lockFile := s.LocalPath(artifact) + ".lock" - mutex := lockedfile.MutexAt(lockFile) - return mutex.Lock() -} - -// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath). -func (s *Storage) LocalPath(artifact sourcev1.Artifact) string { - if artifact.Path == "" { - return "" - } - path, err := securejoin.SecureJoin(s.BasePath, artifact.Path) - if err != nil { - return "" - } - return path -} - -// newHash returns a new SHA256 hash. -func newHash() hash.Hash { - return sha256.New() -} - -// writecounter is an implementation of io.Writer that only records the number -// of bytes written. -type writeCounter struct { - written int64 -} - -func (wc *writeCounter) Write(p []byte) (int, error) { - n := len(p) - wc.written += int64(n) - return n, nil -} diff --git a/controllers/storage_test.go b/controllers/storage_test.go deleted file mode 100644 index 8e0e599a6..000000000 --- a/controllers/storage_test.go +++ /dev/null @@ -1,660 +0,0 @@ -/* -Copyright 2020, 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "archive/tar" - "compress/gzip" - "context" - "fmt" - "io" - "os" - "path" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - . "github.com/onsi/gomega" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" -) - -func TestStorageConstructor(t *testing.T) { - dir := t.TempDir() - - if _, err := NewStorage("/nonexistent", "hostname", time.Minute, 2); err == nil { - t.Fatal("nonexistent path was allowable in storage constructor") - } - - f, err := os.CreateTemp(dir, "") - if err != nil { - t.Fatalf("while creating temporary file: %v", err) - } - f.Close() - - if _, err := NewStorage(f.Name(), "hostname", time.Minute, 2); err == nil { - os.Remove(f.Name()) - t.Fatal("file path was accepted as basedir") - } - os.Remove(f.Name()) - - if _, err := NewStorage(dir, "hostname", time.Minute, 2); err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } -} - -// walks a tar.gz and looks for paths with the basename. It does not match -// symlinks properly at this time because that's painful. -func walkTar(tarFile string, match string, dir bool) (int64, bool, error) { - f, err := os.Open(tarFile) - if err != nil { - return 0, false, fmt.Errorf("could not open file: %w", err) - } - defer f.Close() - - gzr, err := gzip.NewReader(f) - if err != nil { - return 0, false, fmt.Errorf("could not unzip file: %w", err) - } - defer gzr.Close() - - tr := tar.NewReader(gzr) - for { - header, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return 0, false, fmt.Errorf("corrupt tarball reading header: %w", err) - } - - switch header.Typeflag { - case tar.TypeDir: - if header.Name == match && dir { - return 0, true, nil - } - case tar.TypeReg: - if header.Name == match { - return header.Size, true, nil - } - default: - // skip - } - } - - return 0, false, nil -} - -func TestStorage_Archive(t *testing.T) { - dir := t.TempDir() - - storage, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("error while bootstrapping storage: %v", err) - } - - createFiles := func(files map[string][]byte) (dir string, err error) { - dir = t.TempDir() - for name, b := range files { - absPath := filepath.Join(dir, name) - if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { - return - } - f, err := os.Create(absPath) - if err != nil { - return "", fmt.Errorf("could not create file %q: %w", absPath, err) - } - if n, err := f.Write(b); err != nil { - f.Close() - return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) - } - f.Close() - } - return - } - - matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string][]byte, dirs []string) { - t.Helper() - for name, b := range files { - mustExist := !(name[0:1] == "!") - if !mustExist { - name = name[1:] - } - s, exist, err := walkTar(storage.LocalPath(artifact), name, false) - if err != nil { - t.Fatalf("failed reading tarball: %v", err) - } - if bs := int64(len(b)); s != bs { - t.Fatalf("%q size %v != %v", name, s, bs) - } - if exist != mustExist { - if mustExist { - t.Errorf("could not find file %q in tarball", name) - } else { - t.Errorf("tarball contained excluded file %q", name) - } - } - } - for _, name := range dirs { - mustExist := !(name[0:1] == "!") - if !mustExist { - name = name[1:] - } - _, exist, err := walkTar(storage.LocalPath(artifact), name, true) - if err != nil { - t.Fatalf("failed reading tarball: %v", err) - } - if exist != mustExist { - if mustExist { - t.Errorf("could not find dir %q in tarball", name) - } else { - t.Errorf("tarball contained excluded file %q", name) - } - } - } - } - - tests := []struct { - name string - files map[string][]byte - filter ArchiveFileFilter - want map[string][]byte - wantDirs []string - wantErr bool - }{ - { - name: "no filter", - files: map[string][]byte{ - ".git/config": nil, - "file.jpg": []byte(`contents`), - "manifest.yaml": nil, - }, - filter: nil, - want: map[string][]byte{ - ".git/config": nil, - "file.jpg": []byte(`contents`), - "manifest.yaml": nil, - }, - }, - { - name: "exclude VCS", - files: map[string][]byte{ - ".git/config": nil, - "manifest.yaml": nil, - }, - wantDirs: []string{ - "!.git", - }, - filter: SourceIgnoreFilter(nil, nil), - want: map[string][]byte{ - "!.git/config": nil, - "manifest.yaml": nil, - }, - }, - { - name: "custom", - files: map[string][]byte{ - ".git/config": nil, - "custom": nil, - "horse.jpg": nil, - }, - filter: SourceIgnoreFilter([]gitignore.Pattern{ - gitignore.ParsePattern("custom", nil), - }, nil), - want: map[string][]byte{ - "!git/config": nil, - "!custom": nil, - "horse.jpg": nil, - }, - wantErr: false, - }, - { - name: "including directories", - files: map[string][]byte{ - "test/.gitkeep": nil, - }, - filter: SourceIgnoreFilter([]gitignore.Pattern{ - gitignore.ParsePattern("custom", nil), - }, nil), - wantDirs: []string{ - "test", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dir, err := createFiles(tt.files) - if err != nil { - t.Error(err) - return - } - defer os.RemoveAll(dir) - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)+".tar.gz"), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - if err := storage.Archive(&artifact, dir, tt.filter); (err != nil) != tt.wantErr { - t.Errorf("Archive() error = %v, wantErr %v", err, tt.wantErr) - } - matchFiles(t, storage, artifact, tt.want, tt.wantDirs) - }) - } -} - -func TestStorageRemoveAllButCurrent(t *testing.T) { - t.Run("bad directory in archive", func(t *testing.T) { - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } - - if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil { - t.Fatal("Did not error while pruning non-existent path") - } - }) - - t.Run("collect names of deleted items", func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: path.Join("foo", "bar", "artifact1.tar.gz"), - } - - // Create artifact dir and artifacts. - artifactDir := path.Join(dir, "foo", "bar") - g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred()) - current := []string{ - path.Join(artifactDir, "artifact1.tar.gz"), - } - wantDeleted := []string{ - path.Join(artifactDir, "file1.txt"), - path.Join(artifactDir, "file2.txt"), - } - createFile := func(files []string) { - for _, c := range files { - f, err := os.Create(c) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - } - } - createFile(current) - createFile(wantDeleted) - _, err = s.Symlink(artifact, "latest.tar.gz") - g.Expect(err).ToNot(HaveOccurred(), "failed to create symlink") - - deleted, err := s.RemoveAllButCurrent(artifact) - g.Expect(err).ToNot(HaveOccurred(), "failed to remove all but current") - g.Expect(deleted).To(Equal(wantDeleted)) - }) -} - -func TestStorageRemoveAll(t *testing.T) { - tests := []struct { - name string - artifactPath string - createArtifactPath bool - wantDeleted string - }{ - { - name: "delete non-existent path", - artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"), - createArtifactPath: false, - wantDeleted: "", - }, - { - name: "delete existing path", - artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"), - createArtifactPath: true, - wantDeleted: path.Join("foo", "bar"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPath, - } - - if tt.createArtifactPath { - g.Expect(os.MkdirAll(path.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred()) - } - - deleted, err := s.RemoveAll(artifact) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(deleted).To(ContainSubstring(tt.wantDeleted), "unexpected deleted path") - }) - } -} - -func TestStorageCopyFromPath(t *testing.T) { - type File struct { - Name string - Content []byte - } - - dir := t.TempDir() - - storage, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("error while bootstrapping storage: %v", err) - } - - createFile := func(file *File) (absPath string, err error) { - dir = t.TempDir() - absPath = filepath.Join(dir, file.Name) - if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { - return - } - f, err := os.Create(absPath) - if err != nil { - return "", fmt.Errorf("could not create file %q: %w", absPath, err) - } - if n, err := f.Write(file.Content); err != nil { - f.Close() - return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) - } - f.Close() - return - } - - matchFile := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, file *File, expectMismatch bool) { - c, err := os.ReadFile(storage.LocalPath(artifact)) - if err != nil { - t.Fatalf("failed reading file: %v", err) - } - if (string(c) != string(file.Content)) != expectMismatch { - t.Errorf("artifact content does not match and not expecting mismatch, got: %q, want: %q", string(c), string(file.Content)) - } - } - - tests := []struct { - name string - file *File - want *File - expectMismatch bool - }{ - { - name: "content match", - file: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - want: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - }, - { - name: "content not match", - file: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - want: &File{ - Name: "manifest.yaml", - Content: []byte(`mismatch contents`), - }, - expectMismatch: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - absPath, err := createFile(tt.file) - if err != nil { - t.Error(err) - return - } - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - if err := storage.CopyFromPath(&artifact, absPath); err != nil { - t.Errorf("CopyFromPath() error = %v", err) - } - matchFile(t, storage, artifact, tt.want, tt.expectMismatch) - }) - } -} - -func TestStorage_getGarbageFiles(t *testing.T) { - artifactFolder := path.Join("foo", "bar") - tests := []struct { - name string - artifactPaths []string - createPause time.Duration - ttl time.Duration - maxItemsToBeRetained int - totalCountLimit int - wantDeleted []string - }{ - { - name: "delete files based on maxItemsToBeRetained", - artifactPaths: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - path.Join(artifactFolder, "artifact4.tar.gz"), - path.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Millisecond * 10, - ttl: time.Minute * 2, - totalCountLimit: 10, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - { - name: "delete files based on ttl", - artifactPaths: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - path.Join(artifactFolder, "artifact4.tar.gz"), - path.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*3 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl and maxItemsToBeRetained", - artifactPaths: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - path.Join(artifactFolder, "artifact4.tar.gz"), - path.Join(artifactFolder, "artifact5.tar.gz"), - path.Join(artifactFolder, "artifact6.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*5 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit", - artifactPaths: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - path.Join(artifactFolder, "artifact4.tar.gz"), - path.Join(artifactFolder, "artifact5.tar.gz"), - path.Join(artifactFolder, "artifact6.tar.gz"), - }, - createPause: time.Millisecond * 500, - ttl: time.Millisecond * 500, - totalCountLimit: 3, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", tt.ttl, tt.maxItemsToBeRetained) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPaths[len(tt.artifactPaths)-1], - } - g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) - for _, artifactPath := range tt.artifactPaths { - f, err := os.Create(path.Join(dir, artifactPath)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - time.Sleep(tt.createPause) - } - - deletedPaths, err := s.getGarbageFiles(artifact, tt.totalCountLimit, tt.maxItemsToBeRetained, tt.ttl) - g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") - g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths))) - for _, wantDeletedPath := range tt.wantDeleted { - present := false - for _, deletedPath := range deletedPaths { - if strings.Contains(deletedPath, wantDeletedPath) { - present = true - break - } - } - if !present { - g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath)) - } - } - }) - } -} - -func TestStorage_GarbageCollect(t *testing.T) { - artifactFolder := path.Join("foo", "bar") - tests := []struct { - name string - artifactPaths []string - wantDeleted []string - wantErr string - ctxTimeout time.Duration - }{ - { - name: "garbage collects", - artifactPaths: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - path.Join(artifactFolder, "artifact4.tar.gz"), - }, - wantDeleted: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - }, - ctxTimeout: time.Second * 1, - }, - { - name: "garbage collection fails with context timeout", - artifactPaths: []string{ - path.Join(artifactFolder, "artifact1.tar.gz"), - path.Join(artifactFolder, "artifact2.tar.gz"), - path.Join(artifactFolder, "artifact3.tar.gz"), - path.Join(artifactFolder, "artifact4.tar.gz"), - }, - wantErr: "context deadline exceeded", - ctxTimeout: time.Nanosecond * 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Second*2, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPaths[len(tt.artifactPaths)-1], - } - g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) - for i, artifactPath := range tt.artifactPaths { - f, err := os.Create(path.Join(dir, artifactPath)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - if i != len(tt.artifactPaths)-1 { - time.Sleep(time.Second * 1) - } - } - - deletedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout) - if tt.wantErr == "" { - g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - } - if len(tt.wantDeleted) > 0 { - g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths))) - for _, wantDeletedPath := range tt.wantDeleted { - present := false - for _, deletedPath := range deletedPaths { - if strings.Contains(deletedPath, wantDeletedPath) { - g.Expect(deletedPath).ToNot(BeAnExistingFile()) - present = true - break - } - } - if present == false { - g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath)) - } - } - } - }) - } -} diff --git a/controllers/testdata/certs/ca-key.pem b/controllers/testdata/certs/ca-key.pem deleted file mode 100644 index b69de5ab5..000000000 --- a/controllers/testdata/certs/ca-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49 -AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H -dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA== ------END EC PRIVATE KEY----- diff --git a/controllers/testdata/certs/ca.csr b/controllers/testdata/certs/ca.csr deleted file mode 100644 index baa8aeb26..000000000 --- a/controllers/testdata/certs/ca.csr +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x -PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt -cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU -EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz -b34Wow== ------END CERTIFICATE REQUEST----- diff --git a/controllers/testdata/certs/ca.pem b/controllers/testdata/certs/ca.pem deleted file mode 100644 index 080bd24e6..000000000 --- a/controllers/testdata/certs/ca.pem +++ /dev/null @@ -1,11 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw -NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE -AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ -4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O -1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb -OD8EjjCMY69RMO0= ------END CERTIFICATE----- diff --git a/controllers/testdata/certs/server-key.pem b/controllers/testdata/certs/server-key.pem deleted file mode 100644 index 5054ff39f..000000000 --- a/controllers/testdata/certs/server-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49 -AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3 -fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg== ------END EC PRIVATE KEY----- diff --git a/controllers/testdata/certs/server.csr b/controllers/testdata/certs/server.csr deleted file mode 100644 index 5caf7b39c..000000000 --- a/controllers/testdata/certs/server.csr +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86gSzBJBgkqhkiG9w0BCQ4xPDA6 -MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiB5A6wvQ5x6g/zhiyn+wLzXsOaB -Gb/F25p/zTHHQqZbkwIhAPUgWzy/2bs6eZEi97bSlaRdmrqHwqT842t5sEwGyXNV ------END CERTIFICATE REQUEST----- diff --git a/controllers/testdata/certs/server.pem b/controllers/testdata/certs/server.pem deleted file mode 100644 index 11c655a0b..000000000 --- a/controllers/testdata/certs/server.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB7TCCAZKgAwIBAgIUB+17B8PU05wVTzRHLeG+S+ybZK4wCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMzAw -NDE1MDgxODAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86jgbowgbcwDgYDVR0PAQH/BAQD -AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTM8HS5EIlVMBYv/300jN8PEArUgDAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAOgB -5W82FEgiTTOmsNRekkK5jUPbj4D4eHtb2/BI7ph4AiEA2AxHASIFBdv5b7Qf5prb -bdNmUCzAvVuCAKuMjg2OPrE= ------END CERTIFICATE----- diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md new file mode 100644 index 000000000..935d74275 --- /dev/null +++ b/docs/api/v1/source.md @@ -0,0 +1,3724 @@ +

Source API reference v1

+

Packages:

+ +

source.toolkit.fluxcd.io/v1

+

Package v1 contains API Schema definitions for the source v1 API group

+Resource Types: + +

Bucket +

+

Bucket is the Schema for the buckets API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+Bucket +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +BucketSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+
+status
+ + +BucketStatus + + +
+
+
+
+

GitRepository +

+

GitRepository is the Schema for the gitrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+GitRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +GitRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘azure’, ‘github’, ‘generic’. +When not specified, defaults to ‘generic’.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+sparseCheckout
+ +[]string + +
+(Optional) +

SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

+
+
+status
+ + +GitRepositoryStatus + + +
+
+
+
+

HelmChart +

+

HelmChart is the Schema for the helmcharts API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmChart +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmChartSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+status
+ + +HelmChartStatus + + +
+
+
+
+

HelmRepository +

+

HelmRepository is the Schema for the helmrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+status
+ + +HelmRepositoryStatus + + +
+
+
+
+

OCIRepository +

+

OCIRepository is the Schema for the ocirepositories API

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+OCIRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +OCIRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+status
+ + +OCIRepositoryStatus + + +
+
+
+
+

BucketSTSSpec +

+

+(Appears on: +BucketSpec) +

+

BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider of the Security Token Service.

+
+endpoint
+ +string + +
+

Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the ldap provider.

+
+
+
+

BucketSpec +

+

+(Appears on: +Bucket) +

+

BucketSpec specifies the required configuration to produce an Artifact for +an object storage bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+
+
+

BucketStatus +

+

+(Appears on: +Bucket) +

+

BucketStatus records the observed state of a Bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the Bucket object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the Bucket.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful Bucket reconciliation.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

ExternalArtifact +

+

ExternalArtifact is the Schema for the external artifacts API

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +ExternalArtifactSpec + + +
+
+
+ + + + + +
+sourceRef
+ + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
+(Optional) +

SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

+
+
+status
+ + +ExternalArtifactStatus + + +
+
+
+
+

ExternalArtifactSpec +

+

+(Appears on: +ExternalArtifact) +

+

ExternalArtifactSpec defines the desired state of ExternalArtifact

+
+
+ + + + + + + + + + + + + +
FieldDescription
+sourceRef
+ + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
+(Optional) +

SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

+
+
+
+

ExternalArtifactStatus +

+

+(Appears on: +ExternalArtifact) +

+

ExternalArtifactStatus defines the observed state of ExternalArtifact

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of an ExternalArtifact reconciliation.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the ExternalArtifact.

+
+
+
+

GitRepositoryInclude +

+

+(Appears on: +GitRepositorySpec, +GitRepositoryStatus) +

+

GitRepositoryInclude specifies a local reference to a GitRepository which +Artifact (sub-)contents must be included, and where they should be placed.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+repository
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

GitRepositoryRef specifies the GitRepository which Artifact contents +must be included.

+
+fromPath
+ +string + +
+(Optional) +

FromPath specifies the path to copy contents from, defaults to the root +of the Artifact.

+
+toPath
+ +string + +
+(Optional) +

ToPath specifies the path to copy contents to, defaults to the name of +the GitRepositoryRef.

+
+
+
+

GitRepositoryRef +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryRef specifies the Git reference to resolve and checkout.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+branch
+ +string + +
+(Optional) +

Branch to check out, defaults to ‘master’ if no other field is defined.

+
+tag
+ +string + +
+(Optional) +

Tag to check out, takes precedence over Branch.

+
+semver
+ +string + +
+(Optional) +

SemVer tag expression to check out, takes precedence over Tag.

+
+name
+ +string + +
+(Optional) +

Name of the reference to check out; takes precedence over Branch, Tag and SemVer.

+

It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description +Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”

+
+commit
+ +string + +
+(Optional) +

Commit SHA to check out, takes precedence over all reference fields.

+

This can be combined with Branch to shallow clone the branch, in which +the commit is expected to exist.

+
+
+
+

GitRepositorySpec +

+

+(Appears on: +GitRepository) +

+

GitRepositorySpec specifies the required configuration to produce an +Artifact for a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘azure’, ‘github’, ‘generic’. +When not specified, defaults to ‘generic’.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+sparseCheckout
+ +[]string + +
+(Optional) +

SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

+
+
+
+

GitRepositoryStatus +

+

+(Appears on: +GitRepository) +

+

GitRepositoryStatus records the observed state of a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the GitRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the GitRepository.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful GitRepository reconciliation.

+
+includedArtifacts
+ + +[]github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

IncludedArtifacts contains a list of the last successfully included +Artifacts as instructed by GitRepositorySpec.Include.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedRecurseSubmodules
+ +bool + +
+(Optional) +

ObservedRecurseSubmodules is the observed resource submodules +configuration used to produce the current Artifact.

+
+observedInclude
+ + +[]GitRepositoryInclude + + +
+(Optional) +

ObservedInclude is the observed list of GitRepository resources used to +produce the current Artifact.

+
+observedSparseCheckout
+ +[]string + +
+(Optional) +

ObservedSparseCheckout is the observed list of directories used to +produce the current Artifact.

+
+sourceVerificationMode
+ + +GitVerificationMode + + +
+(Optional) +

SourceVerificationMode is the last used verification mode indicating +which Git object(s) have been verified.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

GitRepositoryVerification +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryVerification specifies the Git commit signature verification +strategy.

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mode
+ + +GitVerificationMode + + +
+(Optional) +

Mode specifies which Git object(s) should be verified.

+

The variants “head” and “HEAD” both imply the same thing, i.e. verify +the commit that the HEAD of the Git repository points to. The variant +“head” solely exists to ensure backwards compatibility.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

SecretRef specifies the Secret containing the public keys of trusted Git +authors.

+
+
+
+

GitVerificationMode +(string alias)

+

+(Appears on: +GitRepositoryStatus, +GitRepositoryVerification) +

+

GitVerificationMode specifies the verification mode for a Git repository.

+

HelmChartSpec +

+

+(Appears on: +HelmChart) +

+

HelmChartSpec specifies the desired state of a Helm chart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+
+

HelmChartStatus +

+

+(Appears on: +HelmChart) +

+

HelmChartStatus records the observed state of the HelmChart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmChart +object.

+
+observedSourceArtifactRevision
+ +string + +
+(Optional) +

ObservedSourceArtifactRevision is the last observed Artifact.Revision +of the HelmChartSpec.SourceRef.

+
+observedChartName
+ +string + +
+(Optional) +

ObservedChartName is the last observed chart name as specified by the +resolved chart reference.

+
+observedValuesFiles
+ +[]string + +
+(Optional) +

ObservedValuesFiles are the observed value files of the last successful +reconciliation. +It matches the chart in the last successfully reconciled artifact.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmChart.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

HelmRepositorySpec +

+

+(Appears on: +HelmRepository) +

+

HelmRepositorySpec specifies the required configuration to produce an +Artifact for a Helm repository index YAML.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+
+

HelmRepositoryStatus +

+

+(Appears on: +HelmRepository) +

+

HelmRepositoryStatus records the observed state of the HelmRepository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmRepository.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +HelmRepositoryStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful HelmRepository reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

LocalHelmChartSourceReference +

+

+(Appears on: +HelmChartSpec) +

+

LocalHelmChartSourceReference contains enough information to let you locate +the typed referenced object at namespace level.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+ +string + +
+(Optional) +

APIVersion of the referent.

+
+kind
+ +string + +
+

Kind of the referent, valid values are (‘HelmRepository’, ‘GitRepository’, +‘Bucket’).

+
+name
+ +string + +
+

Name of the referent.

+
+
+
+

OCILayerSelector +

+

+(Appears on: +OCIRepositorySpec, +OCIRepositoryStatus) +

+

OCILayerSelector specifies which layer should be extracted from an OCI Artifact

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mediaType
+ +string + +
+(Optional) +

MediaType specifies the OCI media type of the layer +which should be extracted from the OCI Artifact. The +first layer matching this type is selected.

+
+operation
+ +string + +
+(Optional) +

Operation specifies how the selected layer should be processed. +By default, the layer compressed content is extracted to storage. +When the operation is set to ‘copy’, the layer compressed content +is persisted to storage as it is.

+
+
+
+

OCIRepositoryRef +

+

+(Appears on: +OCIRepositorySpec) +

+

OCIRepositoryRef defines the image reference for the OCIRepository’s URL

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+digest
+ +string + +
+(Optional) +

Digest is the image digest to pull, takes precedence over SemVer. +The value should be in the format ‘sha256:’.

+
+semver
+ +string + +
+(Optional) +

SemVer is the range of tags to pull selecting the latest within +the range, takes precedence over Tag.

+
+semverFilter
+ +string + +
+(Optional) +

SemverFilter is a regex pattern to filter the tags within the SemVer range.

+
+tag
+ +string + +
+(Optional) +

Tag is the image tag to pull, defaults to latest.

+
+
+
+

OCIRepositorySpec +

+

+(Appears on: +OCIRepository) +

+

OCIRepositorySpec defines the desired state of OCIRepository

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+
+

OCIRepositoryStatus +

+

+(Appears on: +OCIRepository) +

+

OCIRepositoryStatus defines the observed state of OCIRepository

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the OCIRepository.

+
+url
+ +string + +
+(Optional) +

URL is the download link for the artifact output of the last OCI Repository sync.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful OCI Repository sync.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedLayerSelector
+ + +OCILayerSelector + + +
+(Optional) +

ObservedLayerSelector is the observed layer selector used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

OCIRepositoryVerification +

+

+(Appears on: +HelmChartSpec, +OCIRepositorySpec) +

+

OCIRepositoryVerification verifies the authenticity of an OCI Artifact

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider specifies the technology used to sign the OCI Artifact.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Kubernetes Secret containing the +trusted public keys.

+
+matchOIDCIdentity
+ + +[]OIDCIdentityMatch + + +
+(Optional) +

MatchOIDCIdentity specifies the identity matching criteria to use +while verifying an OCI artifact which was signed using Cosign keyless +signing. The artifact’s identity is deemed to be verified if any of the +specified matchers match against the identity.

+
+
+
+

OIDCIdentityMatch +

+

+(Appears on: +OCIRepositoryVerification) +

+

OIDCIdentityMatch specifies options for verifying the certificate identity, +i.e. the issuer and the subject of the certificate.

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+issuer
+ +string + +
+

Issuer specifies the regex pattern to match against to verify +the OIDC issuer in the Fulcio certificate. The pattern must be a +valid Go regular expression.

+
+subject
+ +string + +
+

Subject specifies the regex pattern to match against to verify +the identity subject in the Fulcio certificate. The pattern must +be a valid Go regular expression.

+
+
+
+

Source +

+

Source interface must be supported by all API types. +Source is the interface that provides generic access to the Artifact and +interval. It must be supported by all kinds of the source.toolkit.fluxcd.io +API group.

+
+

This page was automatically generated with gen-crd-api-reference-docs

+
diff --git a/docs/api/source.md b/docs/api/v1beta2/source.md similarity index 66% rename from docs/api/source.md rename to docs/api/v1beta2/source.md index 09f072743..8234f7014 100644 --- a/docs/api/source.md +++ b/docs/api/v1beta2/source.md @@ -1,4 +1,4 @@ -

Source API reference

+

Source API reference v1beta2

Packages:

  • @@ -114,6 +114,23 @@ string +sts
    + + +BucketSTSSpec + + + + +(Optional) +

    STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

    +

    This field is only supported for the aws and generic providers.

    + + + + insecure
    bool @@ -138,9 +155,21 @@ string +prefix
    + +string + + + +(Optional) +

    Prefix to use for server-side filtering of files in the Bucket.

    + + + + secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -153,22 +182,65 @@ for the Bucket.

    +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    This field is only supported for the generic provider.

    + + + + +proxySecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

    + + + + interval
    - + Kubernetes meta/v1.Duration -

    Interval at which to check the Endpoint for updates.

    +

    Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    timeout
    - + Kubernetes meta/v1.Duration @@ -209,7 +281,7 @@ Bucket.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -312,7 +384,7 @@ string secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -322,7 +394,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference

    SecretRef specifies the Secret containing authentication credentials for the GitRepository. For HTTPS repositories the Secret must contain ‘username’ and ‘password’ -fields. +fields for basic auth or ‘bearerToken’ field for token auth. For SSH repositories the Secret must contain ‘identity’ and ‘known_hosts’ fields.

    @@ -331,7 +403,7 @@ and ‘known_hosts’ fields.

    interval
    - + Kubernetes meta/v1.Duration @@ -344,7 +416,7 @@ Kubernetes meta/v1.Duration timeout
    - + Kubernetes meta/v1.Duration @@ -421,7 +493,9 @@ string (Optional)

    GitImplementation specifies which Git client library implementation to -use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’).

    +use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’). +Deprecated: gitImplementation is deprecated now that ‘go-git’ is the +only supported implementation.

    @@ -434,8 +508,7 @@ bool (Optional)

    RecurseSubmodules enables the initialization of all submodules within -the GitRepository as cloned from the URL, using their default settings. -This option is available only when using the ‘go-git’ GitImplementation.

    +the GitRepository as cloned from the URL, using their default settings.

    @@ -456,7 +529,7 @@ should be included in the Artifact produced for this GitRepository.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -586,13 +659,15 @@ LocalHelmChartSourceReference interval
    - + Kubernetes meta/v1.Duration -

    Interval is the interval at which to check the Source for updates.

    +

    Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    @@ -643,6 +718,19 @@ is merged before the ValuesFiles items. Ignored when omitted.

    +ignoreMissingValuesFiles
    + +bool + + + +(Optional) +

    IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

    + + + + suspend
    bool @@ -658,7 +746,7 @@ source.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -670,6 +758,24 @@ references to this object. NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

    + + +verify
    + + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + + + +(Optional) +

    Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

    + + @@ -762,7 +868,7 @@ host.

    secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -773,8 +879,35 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference for the HelmRepository. For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ fields. -For TLS the secret must contain a ‘certFile’ and ‘keyFile’, and/or -‘caCert’ fields.

    +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

    + + + + +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

    @@ -798,27 +931,46 @@ in credentials getting stolen in a MITM-attack.

    interval
    - + Kubernetes meta/v1.Duration -

    Interval at which to check the URL for updates.

    +(Optional) +

    Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    + + + + +insecure
    + +bool + + + +(Optional) +

    Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

    timeout
    - + Kubernetes meta/v1.Duration (Optional) -

    Timeout of the index fetch operation, defaults to 60s.

    +

    Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

    @@ -838,7 +990,7 @@ HelmRepository.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -863,6 +1015,20 @@ string When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

    + + +provider
    + +string + + + +(Optional) +

    Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

    + + @@ -968,6 +1134,21 @@ defaults to the latest tag.

    +layerSelector
    + + +OCILayerSelector + + + + +(Optional) +

    LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

    + + + + provider
    string @@ -983,7 +1164,7 @@ When not specified, defaults to ‘generic’.

    secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -997,6 +1178,22 @@ The secret must be of type kubernetes.io/dockerconfigjson.

    +verify
    + + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + + + +(Optional) +

    Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

    + + + + serviceAccountName
    string @@ -1013,44 +1210,64 @@ the image pull if the service account has attached pull secrets. For more inform certSecretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional) -

    CertSecretRef can be given the name of a secret containing +

    CertSecretRef can be given the name of a Secret containing either or both of

      -
    • a PEM-encoded client certificate (certFile) and private -key (keyFile);
    • -
    • a PEM-encoded CA certificate (caFile)
    • +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)

    and whichever are supplied, will be used for connecting to the registry. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if -you are using a self-signed server certificate.

    +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    Note: Support for the caFile, certFile and keyFile keys have +been deprecated.

    + + + + +proxySecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

    interval
    - + Kubernetes meta/v1.Duration -

    The interval at which to check for image updates.

    +

    Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    timeout
    - + Kubernetes meta/v1.Duration @@ -1076,6 +1293,18 @@ consult the documentation for your version to find out what those are.

    +insecure
    + +bool + + + +(Optional) +

    Insecure allows connecting to a non-TLS HTTP container registry.

    + + + + suspend
    bool @@ -1107,15 +1336,9 @@ OCIRepositoryStatus

    Artifact

    -

    -(Appears on: -BucketStatus, -GitRepositoryStatus, -HelmChartStatus, -HelmRepositoryStatus, -OCIRepositoryStatus) -

    Artifact represents the output of a Source reconciliation.

    +

    Deprecated: use Artifact from api/v1 instead. This type will be removed in +a future release.

    @@ -1174,7 +1397,20 @@ string + + + + @@ -1219,6 +1455,94 @@ map[string]string
    (Optional) -

    Checksum is the SHA256 checksum of the Artifact file.

    +

    Checksum is the SHA256 checksum of the Artifact file. +Deprecated: use Artifact.Digest instead.

    +
    +digest
    + +string + +
    +(Optional) +

    Digest is the digest of the file in the form of ‘:’.

    +

    BucketSTSSpec +

    +

    +(Appears on: +BucketSpec) +

    +

    BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    +provider
    + +string + +
    +

    Provider of the Security Token Service.

    +
    +endpoint
    + +string + +
    +

    Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

    +
    +secretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

    +
    +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    This field is only supported for the ldap provider.

    +
    +
    +

    BucketSpec

    @@ -1275,6 +1599,23 @@ string +sts
    + + +BucketSTSSpec + + + + +(Optional) +

    STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

    +

    This field is only supported for the aws and generic providers.

    + + + + insecure
    bool @@ -1299,9 +1640,21 @@ string +prefix
    + +string + + + +(Optional) +

    Prefix to use for server-side filtering of files in the Bucket.

    + + + + secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -1314,22 +1667,65 @@ for the Bucket.

    +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    This field is only supported for the generic provider.

    + + + + +proxySecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

    + + + + interval
    - + Kubernetes meta/v1.Duration -

    Interval at which to check the Endpoint for updates.

    +

    Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    timeout
    - + Kubernetes meta/v1.Duration @@ -1370,7 +1766,7 @@ Bucket.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -1419,7 +1815,7 @@ int64 conditions
    - + []Kubernetes meta/v1.Condition @@ -1447,8 +1843,8 @@ BucketStatus.Artifact data is recommended.

    artifact
    - -Artifact + +github.com/fluxcd/source-controller/api/v1.Artifact @@ -1459,9 +1855,22 @@ Artifact +observedIgnore
    + +string + + + +(Optional) +

    ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

    + + + + ReconcileRequestStatus
    - + github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus @@ -1480,7 +1889,8 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus

    (Appears on: -GitRepositorySpec) +GitRepositorySpec, +GitRepositoryStatus)

    GitRepositoryInclude specifies a local reference to a GitRepository which Artifact (sub-)contents must be included, and where they should be placed.

    @@ -1498,7 +1908,7 @@ Artifact (sub-)contents must be included, and where they should be placed.

    repository
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -1565,8 +1975,6 @@ string (Optional)

    Branch to check out, defaults to ‘master’ if no other field is defined.

    -

    When GitRepositorySpec.GitImplementation is set to ‘go-git’, a shallow -clone of the specified branch is performed.

    @@ -1595,6 +2003,20 @@ string +name
    + +string + + + +(Optional) +

    Name of the reference to check out; takes precedence over Branch, Tag and SemVer.

    +

    It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description +Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”

    + + + + commit
    string @@ -1603,9 +2025,8 @@ string (Optional)

    Commit SHA to check out, takes precedence over all reference fields.

    -

    When GitRepositorySpec.GitImplementation is set to ‘go-git’, this can be -combined with Branch to shallow clone the branch, in which the commit is -expected to exist.

    +

    This can be combined with Branch to shallow clone the branch, in which +the commit is expected to exist.

    @@ -1645,7 +2066,7 @@ string secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -1655,7 +2076,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference

    SecretRef specifies the Secret containing authentication credentials for the GitRepository. For HTTPS repositories the Secret must contain ‘username’ and ‘password’ -fields. +fields for basic auth or ‘bearerToken’ field for token auth. For SSH repositories the Secret must contain ‘identity’ and ‘known_hosts’ fields.

    @@ -1664,7 +2085,7 @@ and ‘known_hosts’ fields.

    interval
    - + Kubernetes meta/v1.Duration @@ -1677,7 +2098,7 @@ Kubernetes meta/v1.Duration timeout
    - + Kubernetes meta/v1.Duration @@ -1754,7 +2175,9 @@ string (Optional)

    GitImplementation specifies which Git client library implementation to -use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’).

    +use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’). +Deprecated: gitImplementation is deprecated now that ‘go-git’ is the +only supported implementation.

    @@ -1767,8 +2190,7 @@ bool (Optional)

    RecurseSubmodules enables the initialization of all submodules within -the GitRepository as cloned from the URL, using their default settings. -This option is available only when using the ‘go-git’ GitImplementation.

    +the GitRepository as cloned from the URL, using their default settings.

    @@ -1789,7 +2211,7 @@ should be included in the Artifact produced for this GitRepository.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -1839,7 +2261,7 @@ object.

    conditions
    - + []Kubernetes meta/v1.Condition @@ -1867,8 +2289,8 @@ GitRepositoryStatus.Artifact data is recommended.

    artifact
    - -Artifact + +github.com/fluxcd/source-controller/api/v1.Artifact @@ -1881,8 +2303,8 @@ Artifact includedArtifacts
    - -[]Artifact + +[]github.com/fluxcd/source-controller/api/v1.Artifact @@ -1910,13 +2332,56 @@ observed in .status.observedGeneration version of the object. This can be used to determine if the content of the included repository has changed. It has the format of <algo>:<checksum>, for example: sha256:<checksum>.

    +

    Deprecated: Replaced with explicit fields for observed artifact content +config in the status.

    + + + + +observedIgnore
    + +string + + + +(Optional) +

    ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

    + + + + +observedRecurseSubmodules
    + +bool + + + +(Optional) +

    ObservedRecurseSubmodules is the observed resource submodules +configuration used to produce the current Artifact.

    + + + + +observedInclude
    + + +[]GitRepositoryInclude + + + + +(Optional) +

    ObservedInclude is the observed list of GitRepository resources used to +to produce the current Artifact.

    ReconcileRequestStatus
    - + github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus @@ -1964,7 +2429,7 @@ string secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -2037,13 +2502,15 @@ LocalHelmChartSourceReference interval
    - + Kubernetes meta/v1.Duration -

    Interval is the interval at which to check the Source for updates.

    +

    Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    @@ -2094,6 +2561,19 @@ is merged before the ValuesFiles items. Ignored when omitted.

    +ignoreMissingValuesFiles
    + +bool + + + +(Optional) +

    IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

    + + + + suspend
    bool @@ -2109,7 +2589,7 @@ source.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -2121,6 +2601,24 @@ references to this object. NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

    + + +verify
    + + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + + + +(Optional) +

    Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

    + + @@ -2183,9 +2681,23 @@ resolved chart reference.

    +observedValuesFiles
    + +[]string + + + +(Optional) +

    ObservedValuesFiles are the observed value files of the last successful +reconciliation. +It matches the chart in the last successfully reconciled artifact.

    + + + + conditions
    - + []Kubernetes meta/v1.Condition @@ -2213,8 +2725,8 @@ BucketStatus.Artifact data is recommended.

    artifact
    - -Artifact + +github.com/fluxcd/source-controller/api/v1.Artifact @@ -2227,7 +2739,7 @@ Artifact ReconcileRequestStatus
    - + github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus @@ -2276,7 +2788,7 @@ host.

    secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -2287,8 +2799,35 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference for the HelmRepository. For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ fields. -For TLS the secret must contain a ‘certFile’ and ‘keyFile’, and/or -‘caCert’ fields.

    +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

    + + + + +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

    @@ -2312,27 +2851,46 @@ in credentials getting stolen in a MITM-attack.

    interval
    - + Kubernetes meta/v1.Duration -

    Interval at which to check the URL for updates.

    +(Optional) +

    Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    + + + + +insecure
    + +bool + + + +(Optional) +

    Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

    timeout
    - + Kubernetes meta/v1.Duration (Optional) -

    Timeout of the index fetch operation, defaults to 60s.

    +

    Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

    @@ -2352,7 +2910,7 @@ HelmRepository.

    accessFrom
    - + github.com/fluxcd/pkg/apis/acl.AccessFrom @@ -2377,6 +2935,20 @@ string When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

    + + +provider
    + +string + + + +(Optional) +

    Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

    + + @@ -2415,7 +2987,7 @@ object.

    conditions
    - + []Kubernetes meta/v1.Condition @@ -2443,8 +3015,8 @@ HelmRepositoryStatus.Artifact data is recommended.

    artifact
    - -Artifact + +github.com/fluxcd/source-controller/api/v1.Artifact @@ -2457,7 +3029,7 @@ Artifact ReconcileRequestStatus
    - + github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus @@ -2529,6 +3101,57 @@ string +

    OCILayerSelector +

    +

    +(Appears on: +OCIRepositorySpec, +OCIRepositoryStatus) +

    +

    OCILayerSelector specifies which layer should be extracted from an OCI Artifact

    +
    +
    + + + + + + + + + + + + + + + + + +
    FieldDescription
    +mediaType
    + +string + +
    +(Optional) +

    MediaType specifies the OCI media type of the layer +which should be extracted from the OCI Artifact. The +first layer matching this type is selected.

    +
    +operation
    + +string + +
    +(Optional) +

    Operation specifies how the selected layer should be processed. +By default, the layer compressed content is extracted to storage. +When the operation is set to ‘copy’, the layer compressed content +is persisted to storage as it is.

    +
    +
    +

    OCIRepositoryRef

    @@ -2574,6 +3197,18 @@ the range, takes precedence over Tag.

    +semverFilter
    + +string + + + +(Optional) +

    SemverFilter is a regex pattern to filter the tags within the SemVer range.

    + + + + tag
    string @@ -2634,6 +3269,21 @@ defaults to the latest tag.

    +layerSelector
    + + +OCILayerSelector + + + + +(Optional) +

    LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

    + + + + provider
    string @@ -2649,7 +3299,7 @@ When not specified, defaults to ‘generic’.

    secretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -2663,6 +3313,22 @@ The secret must be of type kubernetes.io/dockerconfigjson.

    +verify
    + + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + + + +(Optional) +

    Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

    + + + + serviceAccountName
    string @@ -2679,44 +3345,64 @@ the image pull if the service account has attached pull secrets. For more inform certSecretRef
    - + github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional) -

    CertSecretRef can be given the name of a secret containing +

    CertSecretRef can be given the name of a Secret containing either or both of

      -
    • a PEM-encoded client certificate (certFile) and private -key (keyFile);
    • -
    • a PEM-encoded CA certificate (caFile)
    • +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)

    and whichever are supplied, will be used for connecting to the registry. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if -you are using a self-signed server certificate.

    +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +

    Note: Support for the caFile, certFile and keyFile keys have +been deprecated.

    + + + + +proxySecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

    ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

    interval
    - + Kubernetes meta/v1.Duration -

    The interval at which to check for image updates.

    +

    Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    timeout
    - + Kubernetes meta/v1.Duration @@ -2742,6 +3428,18 @@ consult the documentation for your version to find out what those are.

    +insecure
    + +bool + + + +(Optional) +

    Insecure allows connecting to a non-TLS HTTP container registry.

    + + + + suspend
    bool @@ -2789,7 +3487,7 @@ int64 conditions
    - + []Kubernetes meta/v1.Condition @@ -2815,8 +3513,8 @@ string artifact
    - -Artifact + +github.com/fluxcd/source-controller/api/v1.Artifact @@ -2827,59 +3525,66 @@ Artifact -ReconcileRequestStatus
    +contentConfigChecksum
    - -github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus - +string -

    -(Members of ReconcileRequestStatus are embedded into this type.) -

    +(Optional) +

    ContentConfigChecksum is a checksum of all the configurations related to +the content of the source artifact: +- .spec.ignore +- .spec.layerSelector +observed in .status.observedGeneration version of the object. This can +be used to determine if the content configuration has changed and the +artifact needs to be rebuilt. +It has the format of <algo>:<checksum>, for example: sha256:<checksum>.

    +

    Deprecated: Replaced with explicit fields for observed artifact content +config in the status.

    - - - - -

    OCIRepositoryVerification -

    -

    OCIRepositoryVerification verifies the authenticity of an OCI Artifact

    -
    -
    - - - - + + - - @@ -2892,6 +3597,8 @@ trusted public keys.

    Source is the interface that provides generic access to the Artifact and interval. It must be supported by all kinds of the source.toolkit.fluxcd.io API group.

    +

    Deprecated: use the Source interface from api/v1 instead. This type will be +removed in a future release.

    This page was automatically generated with gen-crd-api-reference-docs

    diff --git a/docs/spec/README.md b/docs/spec/README.md index 4af0eb04b..ed8cd38f3 100644 --- a/docs/spec/README.md +++ b/docs/spec/README.md @@ -1,66 +1,7 @@ # Source Controller -The main goal is to define a set of Kubernetes objects that cluster -admins and various automated operators can interact with to offload -the sources (e.g. Git and Helm repositories) registration, authentication, -verification and resource fetching to a dedicated controller. - -## Motivation - -Each Flux and each Helm operator mirrors the Git repositories they are -using, in the same way, using the same code. But other components -might benefit from access to the source mirrors, and Flux and the Helm -operator could work more in sympathy with Kubernetes by factoring it out. - -If "sources" (usually git repos, but also Helm charts and potentially -other things) existed in their own right as Kubernetes resources, -components like Flux and Helm operator could use standard Kubernetes -mechanisms to build on them; and, they could be managed independently -of the components using them. - ## API Specification +* [v1](v1/README.md) * [v1beta2](v1beta2/README.md) * [v1beta1](v1beta1/README.md) - -## Implementation - -The controller implementation will watch for source objects in a cluster and act on them. -The actions performed by the source controller could be: - -* validate source definitions -* authenticate to sources and validate authenticity -* detect source changes based on update policies (semver) -* fetch resources on-demand and on-a-schedule -* package the fetched resources into a well known format (tar.gz, yaml) -* store the artifacts locally -* make the artifacts addressable by their source identifier (sha, version, ts) -* make the artifacts available in-cluster to interested 3rd parties -* notify interested 3rd parties of source changes and availability (status conditions, events, hooks) - -## Impact to Flux - -Having a dedicated controller that manages Git repositories defined with Kubernetes custom resources would: - -* simplify Flux configuration as fluxd could subscribe to Git sources in-cluster and pull the artifacts -automatically without manual intervention from users to reconfigure and redeploy FLux -* improve the installation experience as users will not have to patch fluxd's deployment to inject -the HTTPS basic auth credentials, change the source URL or other Git and PGP related settings -* enable fluxd to compose the desired state of a cluster from multiple sources by applying all artifacts present in flux namespace -* enable fluxd to apply manifests coming from other sources than Git, e.g. S3 buckets -* allow fluxd to run under a non-root user as it wouldn't need to shell out to ssh-keygen, git or pgp -* enable fluxd to apply manifests coming from the most recent semver tag of a Git repository -* allow user to pin the cluster desired state to a specific Git commit or Git tag - -## Impact to Helm Operator - -Having a dedicated controller that manages Helm repositories and charts defined with Kubernetes custom -resources would: - -* simplify the Helm Operator configuration as repository and chart definitions can be re-used across - `HelmRelease` resources (see [fluxcd/helm-operator#142](https://github.com/fluxcd/helm-operator/issues/142)) -* improve the user experience as repositories requiring authentication will no longer require a - `repositories.yaml` import / file mount -* simplify the architecture of the Helm Operator as it allows the operator to work with a single - source type (`HelmChart`) and way of preparing and executing installations and/or upgrades -* allow the Helm Operator to run under a non-root user as it wouldn't need to shell out to git diff --git a/docs/spec/v1/README.md b/docs/spec/v1/README.md new file mode 100644 index 000000000..f08ea805f --- /dev/null +++ b/docs/spec/v1/README.md @@ -0,0 +1,22 @@ +# source.toolkit.fluxcd.io/v1 + +This is the v1 API specification for defining the desired state sources of Kubernetes clusters. + +## Specification + +* Source kinds: + + [GitRepository](gitrepositories.md) + + [OCIRepository](ocirepositories.md) + + [HelmRepository](helmrepositories.md) + + [HelmChart](helmcharts.md) + + [Bucket](buckets.md) + +## Implementation + +* [source-controller](https://github.com/fluxcd/source-controller/) + +## Consumers + +* [kustomize-controller](https://github.com/fluxcd/kustomize-controller/) +* [helm-controller](https://github.com/fluxcd/helm-controller/) +* [source-watcher](https://github.com/fluxcd/source-watcher/) diff --git a/docs/spec/v1/buckets.md b/docs/spec/v1/buckets.md new file mode 100644 index 000000000..077ac952b --- /dev/null +++ b/docs/spec/v1/buckets.md @@ -0,0 +1,1433 @@ +# Buckets + + + +The `Bucket` API defines a Source to produce an Artifact for objects from storage +solutions like Amazon S3, Google Cloud Storage buckets, or any other solution +with a S3 compatible API such as Minio, Alibaba Cloud OSS and others. + +## Example + +The following is an example of a Bucket. It creates a tarball (`.tar.gz`) +Artifact with the fetched objects from an object storage with an S3 +compatible API (e.g. [Minio](https://min.io)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: minio-bucket + namespace: default +spec: + interval: 5m0s + endpoint: minio.example.com + insecure: true + secretRef: + name: minio-bucket-secret + bucketName: example +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-bucket-secret + namespace: default +type: Opaque +stringData: + accesskey: + secretkey: +``` + +In the above example: + +- A Bucket named `minio-bucket` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the object storage bucket every five minutes, + indicated by the `.spec.interval` field. +- It authenticates to the `minio.example.com` endpoint with + the static credentials from the `minio-secret` Secret data, indicated by + the `.spec.endpoint` and `.spec.secretRef.name` fields. +- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag) + in the `.spec.bucketName` bucket is compiled, while filtering the keys using + [default ignore rules](#default-exclusions). +- The digest (algorithm defaults to SHA256) of the list is used as Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current Bucket revision differs from the latest calculated revision, + all objects are fetched and archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `bucket.yaml`, and +changing the Bucket and Secret values to target a Minio instance you have +control over. + +**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see +[Provider](#provider). + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f bucket.yaml + ``` + +2. Run `kubectl get buckets` to see the Bucket: + + ```console + NAME ENDPOINT AGE READY STATUS + minio-bucket minio.example.com 34s True stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ``` + +3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the Bucket's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 + Last Update Time: 2024-02-01T23:43:38Z + Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Revision: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Size: 38099 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Conditions: + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 82s source-controller stored artifact with 16 fetched files from 'example' bucket + ``` + +## Writing a Bucket spec + +As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and +`metadata` fields. The name of a Bucket object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A Bucket also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Provider + +The `.spec.provider` field allows for specifying a Provider to enable provider +specific configurations, for example to communicate with a non-S3 compatible +API endpoint, or to change the authentication method. + +Supported options are: + +- [Generic](#generic) +- [AWS](#aws) +- [Azure](#azure) +- [GCP](#gcp) + +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### Generic + +When a Bucket's `spec.provider` is set to `generic`, the controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go), which can communicate +with any Amazon S3 compatible object storage (including +[GCS](https://cloud.google.com/storage/docs/interoperability), +[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-), +and many others). + +The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a +Secret with `.data.accesskey` and `.data.secretkey` values, used to +authenticate with static credentials. + +The Provider allows for specifying a region the bucket is in using the +[`.spec.region` field](#region), if required by the [Endpoint](#endpoint). + +##### Generic example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: generic-insecure + namespace: default +spec: + provider: generic + interval: 5m0s + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + timeout: 60s + insecure: true + secretRef: + name: minio-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### AWS + +When a Bucket's `.spec.provider` field is set to `aws`, the source-controller +will attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go). + +Without a [Secret reference](#secret-reference), authorization using +credentials retrieved from the AWS EC2 service is attempted by default. When +a reference is specified, it expects a Secret with `.data.accesskey` and +`.data.secretkey` values, used to authenticate with static credentials. + +The Provider allows for specifying the +[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) +using the [`.spec.region` field](#region). + +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/aws/#for-amazon-simple-storage-service + +##### AWS EC2 example + +**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for +the source-controller service account that grants access to the bucket. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS IAM role example + +Replace `` with the specified `.spec.bucketName`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} +``` + +##### AWS static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + secretRef: + name: aws-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: aws-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +##### AWS Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + serviceAccountName: aws-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-workload-identity-sa + namespace: default + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/flux-bucket-role +``` + +#### Azure + +When a Bucket's `.spec.provider` is set to `azure`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Azure Blob Storage SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob). + +Without a [Secret reference](#secret-reference), authentication using a chain +with: + +- [Environment credentials](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential) +- [Workload Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential) +- [Managed Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) + with the `AZURE_CLIENT_ID` +- Managed Identity with a system-assigned identity + +is attempted by default. If no chain can be established, the bucket +is assumed to be publicly reachable. + +When a reference is specified, it expects a Secret with one of the following +sets of `.data` fields: + +- `tenantId`, `clientId` and `clientSecret` for authenticating a Service + Principal with a secret. +- `tenantId`, `clientId` and `clientCertificate` (plus optionally + `clientCertificatePassword` and/or `clientCertificateSendChain`) for + authenticating a Service Principal with a certificate. +- `clientId` for authenticating using a Managed Identity. +- `accountKey` for authenticating using a + [Shared Key](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#SharedKeyCredential). +- `sasKey` for authenticating using a [SAS Token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) + +For any Managed Identity and/or Microsoft Entra ID (Formerly Azure Active Directory) authentication method, +the base URL can be configured using `.data.authorityHost`. If not supplied, +[`AzurePublicCloud` is assumed](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AuthorityHost). + +##### Azure example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-public + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: podinfo + endpoint: https://podinfoaccount.blob.core.windows.net + timeout: 30s +``` + +##### Azure Service Principal Secret example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-secret + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientSecret: +``` + +##### Azure Service Principal Certificate example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-cert + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientCertificate: + # Plus optionally + clientCertificatePassword: + clientCertificateSendChain: # either "1" or "true" +``` + +##### Azure Managed Identity with Client ID example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-managed-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-smi-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-smi-auth + namespace: default +type: Opaque +data: + clientId: +``` + +##### Azure Blob Shared Key example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-shared-key + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + accountKey: +``` + +##### Workload Identity + +If you have [Workload Identity](https://azure.github.io/azure-workload-identity/docs/installation/managed-clusters.html) +set up on your cluster, you need to create an Azure Identity and give it +access to Azure Blob Storage. + +```shell +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +``` + +Establish a federated identity between the Identity and the source-controller +ServiceAccount. + +```shell +export SERVICE_ACCOUNT_ISSUER="$(az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -otsv)" + +az identity federated-credential create \ + --name "kubernetes-federated-credential" \ + --identity-name "${IDENTITY_NAME}" \ + --resource-group "${RESOURCE_GROUP}" \ + --issuer "${SERVICE_ACCOUNT_ISSUER}" \ + --subject "system:serviceaccount:flux-system:source-controller" +``` + +Add a patch to label and annotate the source-controller Deployment and ServiceAccount +correctly so that it can match an identity binding: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +If you have set up Workload Identity correctly and labeled the source-controller +Deployment and ServiceAccount, then you don't need to reference a Secret. For more information, +please see [documentation](https://azure.github.io/azure-workload-identity/docs/quick-start.html). + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net +``` + +##### Azure Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net + serviceAccountName: azure-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-workload-identity-sa + namespace: default + annotations: + azure.workload.identity/client-id: + azure.workload.identity/tenant-id: +``` + +##### Azure Blob SAS Token example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-sas-token + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + sasKey: +``` + +The `sasKey` only contains the SAS token e.g +`?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. +The leading question mark (`?`) is optional. The query values from the `sasKey` +data field in the Secrets gets merged with the ones in the `.spec.endpoint` of +the Bucket. If the same key is present in the both of them, the value in the +`sasKey` takes precedence. + +**Note:** The SAS token has an expiry date, and it must be updated before it +expires to allow Flux to continue to access Azure Storage. It is allowed to use +an account-level or container-level SAS token. + +The minimum permissions for an account-level SAS token are: + +- Allowed services: `Blob` +- Allowed resource types: `Container`, `Object` +- Allowed permissions: `Read`, `List` + +The minimum permissions for a container-level SAS token are: + +- Allowed permissions: `Read`, `List` + +Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/storageservices/create-account-sas#blob-service) for a full overview on permissions. + +#### GCP + +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/gcp/#for-google-cloud-storage + +##### GCP Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +##### GCP Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + serviceAccountName: gcp-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: gcp-workload-identity-sa + namespace: default + annotations: + iam.gke.io/gcp-service-account: +``` + +##### GCP static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-secret + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: + endpoint: storage.googleapis.com + region: + secretRef: + name: gcp-service-account +--- +apiVersion: v1 +kind: Secret +metadata: + name: gcp-service-account + namespace: default +type: Opaque +data: + serviceaccount: +``` + +Where the (base64 decoded) value of `.data.serviceaccount` looks like this: + +```json +{ + "type": "service_account", + "project_id": "example", + "private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2", + "private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n", + "client_email": "test@example.iam.gserviceaccount.com", + "client_id": "32657634678762536746", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com" +} +``` + +### Interval + +`.spec.interval` is a required field that specifies the interval which the +object storage bucket must be consulted at. + +After successfully reconciling a Bucket object, the source-controller requeues +the object for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the object storage bucket every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. the apply of a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple Bucket objects are set up +with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Endpoint + +`.spec.endpoint` is a required field that specifies the HTTP/S object storage +endpoint to connect to and fetch objects from. Connecting to an (insecure) +HTTP endpoint requires enabling [`.spec.insecure`](#insecure). + +Some endpoints require the specification of a [`.spec.region`](#region), +see [Provider](#provider) for more (provider specific) examples. + +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Bucket name + +`.spec.bucketName` is a required field that specifies which object storage +bucket on the [Endpoint](#endpoint) objects should be fetched from. + +See [Provider](#provider) for more (provider specific) examples. + +### Region + +`.spec.region` is an optional field to specify the region a +[`.spec.bucketName`](#bucket-name) is located in. + +See [Provider](#provider) for more (provider specific) examples. + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards a bucket using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +[endpoint](#endpoint), if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for object storage +fetch operations. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. +The default value is `60s`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the Bucket, containing authentication +credentials for the object storage. For some `.spec.provider` implementations +the presence of the field is required, see [Provider](#provider) for more +details and examples. + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as Bucket with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible object storage, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +**Important:** `.spec.secretRef` and `.spec.serviceAccountName` are mutually +exclusive and cannot be set at the same time. This constraint is enforced +at the CRD level. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Prefix + +`.spec.prefix` is an optional field to enable server-side filtering +of files in the Bucket. + +**Note:** The server-side filtering works only with the `generic`, `aws` +and `gcp` [provider](#provider) and is preferred over [`.spec.ignore`](#ignore) +as a more efficient way of excluding files. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage +objects which keys match the defined rules are excluded while fetching. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket. +When set to `true`, the controller will stop reconciling the Bucket, and changes +to the resource or in the object storage bucket will not result in a new +Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with Buckets + +### Excluding files + +By default, storage bucket objects which match the [default exclusion +rules](#default-exclusions) are excluded while fetching. It is possible to +overwrite and/or overrule the default exclusions using a file in the bucket +and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the root of the +object storage bucket. The `.sourceignore` file follows [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +#### Ignore spec + +Another option is to define the exclusions within the Bucket spec, using the +[`.spec.ignore` field](#ignore). Specified rules override the +[default exclusion list](#default-exclusions), and may overrule `.sourceignore` +file exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a Bucket outside the +[specified interval window](#interval), a Bucket can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the Bucket for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite bucket/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source bucket +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the Bucket to reach a +[ready state](#ready-bucket) using `kubectl`: + +```sh +kubectl wait bucket/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a Bucket, you can suspend it using the [`.spec.suspend` +field](#suspend). + +#### Suspend a Bucket + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source bucket +``` + +**Note:** When a Bucket has an Artifact and is suspended, and this Artifact +later disappears from the storage due to e.g. the source-controller Pod being +evicted from a Node, this will not be reflected in the Bucket's Status until it +is resumed. + +#### Resume a Bucket + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source bucket +``` + +### Debugging a Bucket + +There are several ways to gather information about a Bucket for debugging +purposes. + +#### Describe the Bucket + +Describing a Bucket using `kubectl describe bucket ` displays the +latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2024-02-02T13:26:55Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: False + Type: Ready + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist +``` + +#### Trace emitted Events + +To view events for specific Bucket(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for Bucket/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m30s Normal NewArtifact bucket/ fetched 16 files with revision from 'my-new-bucket' +36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +18s Warning BucketOperationFailed bucket/ bucket 'my-new-bucket' does not exist +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=`. + +## Bucket Status + +### Artifact + +The Bucket reports the latest synchronized state from the object storage +bucket as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive +(`.tar.gz`), and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +status: + artifact: + digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a + lastUpdateTime: "2024-01-28T10:30:30Z" + path: bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + size: 38099 + url: http://source-controller..svc.cluster.local./bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A Bucket enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-bucket) while fetching storage objects, +it can be [ready](#ready-bucket), or it can [fail during +reconciliation](#failed-bucket). + +The Bucket API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the Bucket to become +`Ready`. + +#### Reconciling Bucket + +The source-controller marks a Bucket as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the Bucket, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the Bucket is newer than the [Observed Generation](#observed-generation). +- The newly calculated Artifact revision differs from the current Artifact. + +When the Bucket is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the Bucket's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the Bucket while their status value is `"True"`. + +#### Ready Bucket + +The source-controller marks a Bucket as _ready_ when it has the following +characteristics: + +- The Bucket reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The Bucket was able to communicate with the Bucket's object storage endpoint + using the current spec. +- The revision of the reported Artifact is up-to-date with the latest + calculated revision of the object storage bucket. + +When the Bucket is "ready", the controller sets a Condition with the following +attributes in the Bucket's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the Bucket +is marked as [reconciling](#reconciling-bucket), or e.g. a +[transient error](#failed-bucket) occurs due to a temporary network issue. + +When the Bucket Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +Bucket's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed Bucket + +The source-controller may get stuck trying to produce an Artifact for a Bucket +without completing. This can occur due to some of the following factors: + +- The object storage [Endpoint](#endpoint) is temporarily unavailable. +- The specified object storage bucket does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The Bucket spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the Bucket's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: BucketOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the Bucket while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the Bucket has this Condition, the controller will continue to attempt +to produce an Artifact for the resource with an exponential backoff, until +it succeeds and the Bucket is marked as [ready](#ready-bucket). + +Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at +the same time, for example due to a newly introduced configuration issue in the +Bucket spec. When a reconciliation fails, the `Reconciling` Condition reason +would be `ProgressingWithRetry`. When the reconciliation is performed again +after the failure, the reason is updated to `Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the Bucket's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-bucket), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Generation + +The source-controller reports an +[observed generation][typical-status-properties] +in the Bucket's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/externalartifacts.md b/docs/spec/v1/externalartifacts.md new file mode 100644 index 000000000..1eccbe0e0 --- /dev/null +++ b/docs/spec/v1/externalartifacts.md @@ -0,0 +1,114 @@ +# External Artifacts + + + +The `ExternalArtifact` is a generic API designed for interoperability with Flux. +It allows 3rd party controllers to produce and store [Artifact](#artifact) objects +in the same way as Flux's own source-controller. +For more details on the design and motivation behind this API, +see [RFC-0012](https://github.com/fluxcd/flux2/tree/main/rfcs/0012-external-artifact). + +## Example + +The following is an example of a ExternalArtifact produced by a 3rd party +source controller: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: ExternalArtifact +metadata: + name: my-artifact + namespace: flux-system +spec: + sourceRef: + apiVersion: example.com/v1 + kind: Source + name: my-source +status: + artifact: + digest: sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + lastUpdateTime: "2025-08-21T13:37:31Z" + path: source/flux-system/my-source/35d47c9d.tar.gz + revision: v1.0.0@sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + size: 20914 + url: http://example-controller.flux-system.svc.cluster.local./source/flux-system/my-source/35d47c9d.tar.gz + conditions: + - lastTransitionTime: "2025-08-21T13:37:31Z" + message: stored artifact for revision v1.0.0 + observedGeneration: 1 + reason: Succeeded + status: "True" + type: Ready +``` + +## ExternalArtifact spec + +### Source reference + +The `spec.sourceRef` field is optional and contains a reference +to the custom resource that the ExternalArtifact is based on. + +The `spec.sourceRef` contains the following fields: + +- `apiVersion`: the API version of the custom resource. +- `kind`: the kind of the custom resource. +- `name`: the name of the custom resource. +- `namespace`: the namespace of the custom resource. If omitted, it defaults to the + namespace of the ExternalArtifact. + +## ExternalArtifact status + +### Artifact + +The ExternalArtifact reports the latest synchronized state +as an Artifact object in the `.status.artifact`. + +The `.status.artifact` contains the following fields: + +- `digest`: The checksum of the tar.gz file in the format `:`. +- `lastUpdateTime`: Timestamp of the last artifact update. +- `path`: Relative file path of the artifact in storage. +- `revision`: Human-readable identifier with version and checksum in the format `@:`. +- `size`: Number of bytes in the tar.gz file. +- `url`: In-cluster HTTP address for artifact retrieval. + +### Conditions + +The ExternalArtifact reports its status using Kubernetes standard conditions. + +#### Ready ExternalArtifact + +When the 3rd party controller has successfully produced and stored an +Artifact in storage, it sets a Condition with the following +attributes in the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful storage of the artifact and the associated revision. + +If the 3rd party controller performs a signature verification +of the artifact, and the verification is successful, a Condition with the +following attributes is added to the ExternalArtifact's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful verification of the artifact and the associated verification method. + +#### Failed ExternalArtifact + +If the 3rd party controller fails to produce and store an Artifact, +it sets the `Ready` Condition status to `False`, and adds a Condition with +the following attributes to the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "False"` +- `reason: FetchFailed` | `reason: StorageOperationFailed` | `reason: VerificationFailed` + +The `message` field should contain a human-readable message indicating +the reason for the failure. diff --git a/docs/spec/v1/gitrepositories.md b/docs/spec/v1/gitrepositories.md new file mode 100644 index 000000000..d39ee73d3 --- /dev/null +++ b/docs/spec/v1/gitrepositories.md @@ -0,0 +1,1259 @@ +# Git Repositories + + + +The `GitRepository` API defines a Source to produce an Artifact for a Git +repository revision. + +## Example + +The following is an example of a GitRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from a Git repository for the +resolved reference. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://github.com/stefanprodan/podinfo + ref: + branch: master +``` + +In the above example: + +- A GitRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the Git repository every five minutes, indicated + by the `.spec.interval` field. +- It clones the `master` branch of the `https://github.com/stefanprodan/podinfo` + repository, indicated by the `.spec.ref.branch` and `.spec.url` fields. +- The specified branch and resolved HEAD revision are used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current GitRepository revision differs from the latest fetched + revision, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `gitrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f gitrepository.yaml + ``` + +2. Run `kubectl get gitrepository` to see the GitRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://github.com/stefanprodan/podinfo 5s True stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + ``` + +3. Run `kubectl describe gitrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the GitRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2 + Last Update Time: 2022-02-14T11:23:36Z + Path: gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz + Revision: master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc + Size: 91318 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz + Conditions: + Last Transition Time: 2022-02-14T11:23:36Z + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-14T11:23:36Z + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' + ``` + +## Writing a GitRepository spec + +As with all other Kubernetes config, a GitRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a GitRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A GitRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the HTTP/S or SSH address of the +Git repository. + +**Note:** Unlike using `git`, the +[shorter scp-like syntax](https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#_the_ssh_protocol) +is not supported for SSH addresses (e.g. `user@example.com:repository.git`). +Instead, the valid URL format is `ssh://user@example.com:22/repository.git`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the GitRepository, containing authentication +credentials for the Git repository. + +The required fields in the Secret depend on the specified protocol in the +[URL](#url). + +#### Basic access authentication + +To authenticate towards a Git repository over HTTPS using basic access +authentication (in other words: using a username and password), the referenced +Secret is expected to contain `.data.username` and `.data.password` values. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: basic-access-auth +type: Opaque +data: + username: + password: +``` + +#### Bearer token authentication + +To authenticate towards a Git repository over HTTPS using bearer token +authentication (in other words: using a `Authorization: Bearer` header), the referenced +Secret is expected to contain the token in `.data.bearerToken`. + +**Note:** If you are looking to use OAuth tokens with popular servers (e.g. +[GitHub](https://docs.github.com/en/rest/overview/authenticating-to-the-rest-api?apiVersion=2022-11-28#authenticating-with-a-token-generated-by-an-app), +[Bitbucket](https://support.atlassian.com/bitbucket-cloud/docs/using-access-tokens/), +[GitLab](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#clone-using-a-token)), +you should use basic access authentication instead. These servers use basic HTTP +authentication, with the OAuth token as the password. Check the documentation of +your Git server for details. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: bearer-token-auth +type: Opaque +data: + bearerToken: +``` + +#### HTTPS Certificate Authority + +To provide a Certificate Authority to trust while connecting with a Git +repository over HTTPS, the referenced Secret's `.data` can contain a `ca.crt` +or `caFile` key. `ca.crt` takes precedence over `caFile`, i.e. if both keys +are present, the value of `ca.crt` will be taken into consideration. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-ca-credentials + namespace: default +type: Opaque +data: + ca.crt: +``` + +#### HTTPS Mutual TLS authentication + +To authenticate towards a Git repository over HTTPS using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used + for TLS client authentication. These must be used in conjunction, i.e. + specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is + required if the server is using a self-signed certificate. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-tls-certs + namespace: default +type: Opaque +data: + tls.crt: + tls.key: + ca.crt: +``` + +#### SSH authentication + +To authenticate towards a Git repository over SSH, the referenced Secret is +expected to contain `identity` and `known_hosts` fields. With the respective +private key of the SSH key pair, and the host keys of the Git repository. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssh-credentials +type: Opaque +stringData: + identity: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- + known_hosts: | + github.com ecdsa-sha2-nistp256 AAAA... +``` + +Alternatively, the Flux CLI can be used to automatically create the +secret, and also populate the known_hosts: + +```sh +flux create secret git podinfo-auth \ + --url=ssh://git@github.com/stefanprodan/podinfo \ + --private-key-file=./identity +``` + +For password-protected SSH private keys, the password must be provided +via an additional `password` field in the secret. Flux CLI also supports +this via the `--password` flag. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider +used for authentication purposes. + +Supported options are: + +- `generic` +- `azure` +- `github` + +When provider is not specified, it defaults to `generic` indicating that +mechanisms using `spec.secretRef` are used for authentication. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### Azure + +The `azure` provider can be used to authenticate to Azure DevOps repositories +automatically using Workload Identity. + +##### Pre-requisites + +- Ensure that your Azure DevOps Organization is + [connected](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/connect-organization-to-azure-ad?view=azure-devops) + to Microsoft Entra. +- Ensure Workload Identity is properly [set up on your + cluster](https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster#create-an-aks-cluster). + +##### Configure Flux controller + +- Create a managed identity to access Azure DevOps. Establish a federated + identity credential between the managed identity and the source-controller + service account. In the default installation, the source-controller service + account is located in the `flux-system` namespace with name + `source-controller`. Ensure the federated credential uses the correct + namespace and name of the source-controller service account. For more details, + please refer to this + [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +- Add the managed identity to the Azure DevOps organization as a user. Ensure + that the managed identity has the necessary permissions to access the Azure + DevOps repository as described + [here](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/service-principal-managed-identity?view=azure-devops#2-add-and-manage-service-principals-in-an-azure-devops-organization). + +- Add the following patch to your bootstrap repository in + `flux-system/kustomization.yaml` file: + + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +**Note:** When azure `provider` is used with `GitRepository`, the `.spec.url` +must follow this format: + +``` +https://dev.azure.com/{your-organization}/{your-project}/_git/{your-repository} +``` +#### GitHub + +The `github` provider can be used to authenticate to Git repositories using +[GitHub Apps](https://docs.github.com/en/apps/overview). + +##### Pre-requisites + +- [Register](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) + the GitHub App with the necessary permissions and [generate a private + key](https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/managing-private-keys-for-github-apps) + for the app. + +- [Install](https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app) + the app in the organization/account configuring access to the necessary + repositories. + +##### Configure GitHub App secret + +The GitHub App information is specified in `.spec.secretRef` in the format +specified below: + +- Get the App ID from the app settings page at + `https://github.com/settings/apps/`. +- Get the App Installation ID from the app installations page at +`https://github.com/settings/installations`. Click the installed app, the URL +will contain the installation ID +`https://github.com/settings/installations/`. For +organizations, the first part of the URL may be different, but it follows the +same pattern. +- The private key that was generated in the pre-requisites. +- (Optional) GitHub Enterprise Server users can set the base URL to + `http(s)://HOSTNAME/api/v3`. +- (Optional) If GitHub Enterprise Server uses a private CA, include its bundle (root and any intermediates) in `ca.crt`. + If the `ca.crt` is specified, then it will be used for TLS verification for all API / Git over `HTTPS` requests to the GitHub Enterprise Server. + +**NOTE:** If the secret contains `tls.crt`, `tls.key` then [mutual TLS configuration](#https-mutual-tls-authentication) will be automatically enabled. +Omit these keys if the GitHub server does not support mutual TLS. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: github-sa +type: Opaque +stringData: + githubAppID: "" + githubAppInstallationID: "" + githubAppPrivateKey: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- + githubAppBaseURL: "" #optional, required only for GitHub Enterprise Server users + ca.crt: | #optional, for GitHub Enterprise Server users + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +Alternatively, the Flux CLI can be used to automatically create the secret with +the github app authentication information. + +```sh +flux create secret githubapp ghapp-secret \ + --app-id=1 \ + --app-installation-id=3 \ + --app-private-key=~/private-key.pem +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as GitRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `azure`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. For Azure DevOps specific setup, see the + [Azure DevOps integration guide](https://fluxcd.io/flux/integrations/azure/#for-azure-devops). + +**Note:** that for a publicly accessible git repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Git repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple GitRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for Git operations +like cloning. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the Git reference to resolve and +watch for changes. References are specified in one or more subfields +(`.branch`, `.tag`, `.semver`, `.name`, `.commit`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to a `master` +branch reference. + +#### Branch example + +To Git checkout a specified branch, use `.spec.ref.branch`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + branch: +``` + +This will perform a shallow clone to only fetch the specified branch. + +#### Tag example + +To Git checkout a specified tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + tag: +``` + +This field takes precedence over [`.branch`](#branch-example). + +#### SemVer example + +To Git checkout a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.branch`](#branch-example) and +[`.tag`](#tag-example). + + +#### Name example + +To Git checkout a specified [reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References), +use `.spec.ref.name`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + # Ref name format reference: https://git-scm.com/docs/git-check-ref-format#_description + name: +``` + +Valid examples are: `refs/heads/main`, `refs/tags/v0.1.0`, `refs/pull/420/head`, +`refs/merge-requests/1/head`. + +This field takes precedence over [`.branch`](#branch-example), +[`.tag`](#tag-example), and [`.semver`](#semver-example). + +**Note:** Azure DevOps and AWS CodeCommit do not support fetching the HEAD of +a pull request. While Azure DevOps allows you to fetch the merge commit that +will be created after merging a PR (using `refs/pull//merge`), this field +can only be used to fetch references that exist in the current state of the Git +repository and not references that will be created in the future. + +#### Commit example + +To Git checkout a specified commit, use `.spec.ref.commit`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + commit: "" +``` + +This field takes precedence over all other fields. It can be combined with +`.spec.ref.branch` to perform a shallow clone of the branch, in which the +commit must exist: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + branch: + commit: "" +``` + +### Verification + +`.spec.verify` is an optional field to enable the verification of Git commit +signatures. The field offers two subfields: + +- `.mode`, to specify what Git object(s) should be verified. Supported + values are: + - `HEAD`: Verifies the commit object pointed to by the HEAD of the repository + after performing a checkout via `.spec.ref`. + - `head`: Same as `HEAD`, supported for backwards compatibility purposes. + - `Tag`: Verifies the tag object pointed to by the specified/inferred tag + reference in `.spec.ref.tag`, `.spec.ref.semver` or `.spec.ref.name`. + - `TagAndHEAD`: Verifies the tag object pointed to by the specified/inferred tag + reference in `.spec.ref.tag`, `.spec.ref.semver` or `.spec.ref.name` and + the commit object pointed to by the tag. + +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the GitRepository. Containing the (PGP) public keys of trusted Git authors. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 1m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + verify: + mode: HEAD + secretRef: + name: pgp-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the GitRepository's `.status.conditions`: + +- `type: SourceVerifiedCondition` +- `status: "True"` +- `reason: Succeeded` + +#### Verification Secret example + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgp-public-keys + namespace: default +type: Opaque +data: + author1.asc: + author2.asc: +``` + +Exporting armored public keys (`.asc` files) using `gpg`, and generating a +Secret: + +```sh +# Export armored public keys +gpg --export --armor 3CB12BA185C47B67 > author1.asc +gpg --export --armor 6A7436E8790F8689 > author2.asc +# Generate secret +kubectl create secret generic pgp-public-keys \ + --from-file=author1.asc \ + --from-file=author2.asc \ + -o yaml +``` + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Sparse checkout + +`.spec.sparseCheckout` is an optional field to specify list of directories to +checkout when cloning the repository. If specified, only the specified directory +contents will be present in the artifact produced for this repository. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + sparseCheckout: + - charts + - kustomize +``` + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +GitRepository. When set to `true`, the controller will stop reconciling the +GitRepository, and changes to the resource or in the Git repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all remote Git operations related to the GitRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +The proxy server must be either HTTP/S or SOCKS5. You can use a SOCKS5 proxy +with a HTTP/S Git repository url. + +Examples: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssh-proxy +type: Opaque +stringData: + address: socks5://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Recurse submodules + +`.spec.recurseSubmodules` is an optional field to enable the initialization of +all submodules within the cloned Git repository, using their default settings. +This option defaults to `false`. + +Note that for most Git providers (e.g. GitHub and GitLab), deploy keys can not +be used as reusing a key across multiple repositories is not allowed. You have +to use either [HTTPS token-based authentication](#basic-access-authentication), +or an SSH key belonging to a (bot) user who has access to the main repository +and all submodules. + +### Include + +`.spec.include` is an optional field to map the contents of GitRepository +Artifacts into another. This may look identical to Git submodules but has +multiple benefits over regular submodules: + +- Including a `GitRepository` allows you to use different authentication + methods for different repositories. +- A change in the included repository will trigger an update of the including + repository. +- Multiple `GitRepository` objects could include the same repository, which + decreases the amount of cloning done compared to using submodules. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: include-example +spec: + include: + - repository: + name: other-repository + fromPath: deploy/kubernetes + toPath: base/app +``` + +The `.fromPath` and `.toPath` fields allow you to limit the files included, and +where they will be copied to. If you do not specify a value for `.fromPath`, +all files from the referenced GitRepository Artifact will be included. The +`.toPath` defaults to the `.repository.name` (e.g. `./other-repository/*`). + +## Working with GitRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the Git repository contents as an Artifact. It is +possible to overwrite and/or overrule the default exclusions using a file in +the Git repository and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the Git +repository. The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the repository root or in subdirectories. + +#### Ignore spec + +Another option is to define the exclusions within the GitRepository spec, using +the [`.spec.ignore` field](#ignore). Specified rules override the [default +exclusion list](#default-exclusions), and may overrule `.sourceignore` file +exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a GitRepository outside the +[specified interval window](#interval), a GitRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the GitRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite gitrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source git +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the GitRepository to reach +a [ready state](#ready-gitrepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a GitRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a GitRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch gitrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source git +``` + +**Note:** When a GitRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +GitRepository's Status until it is resumed. + +#### Resume a GitRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch gitrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source git +``` + +### Debugging a GitRepository + +There are several ways to gather information about a GitRepository for +debugging purposes. + +#### Describe the GitRepository + +Describing a GitRepository using +`kubectl describe gitrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" + Observed Generation: 2 + Reason: GitOperationFailed + Status: False + Type: Ready + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" + Observed Generation: 2 + Reason: GitOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning GitOperationFailed 2s (x9 over 4s) source-controller failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" +``` + +#### Trace emitted Events + +To view events for specific GitRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for GitRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact gitrepository/ stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' +36s Normal ArtifactUpToDate gitrepository/ artifact up-to-date with remote revision: 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' +94s Warning GitOperationFailed gitrepository/ failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific GitRepository, e.g. +`flux logs --level=error --kind=GitRepository --name=`. + +## GitRepository Status + +### Artifact + +The GitRepository reports the latest synchronized state from the Git repository +as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +status: + artifact: + digest: sha256:e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b + lastUpdateTime: "2022-01-29T06:59:23Z" + path: gitrepository///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: master@sha1:363a6a8fe6a7f13e05d34c163b0ef02a777da20a + size: 91318 + url: http://source-controller..svc.cluster.local./gitrepository///363a6a8fe6a7f13e05d34c163b0ef02a777da20a.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A GitRepository enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-gitrepository) while fetching the Git +state, it can be [ready](#ready-gitrepository), or it can [fail during +reconciliation](#failed-gitrepository). + +The GitRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the GitRepository to +become `Ready`. + +#### Reconciling GitRepository + +The source-controller marks a GitRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the GitRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the GitRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact revision differs from the current Artifact. + +When the GitRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the GitRepository's +`.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the GitRepository while their status value is `"True"`. + +#### Ready GitRepository + +The source-controller marks a GitRepository as _ready_ when it has the +following characteristics: + +- The GitRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote Git repository using + the current spec. +- The revision of the reported Artifact is up-to-date with the latest + resolved revision of the remote Git repository. + +When the GitRepository is "ready", the controller sets a Condition with the +following attributes in the GitRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +GitRepository is marked as [reconciling](#reconciling-gitrepository), or e.g. a +[transient error](#failed-gitrepository) occurs due to a temporary network issue. + +When the GitRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +GitRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed GitRepository + +The source-controller may get stuck trying to produce an Artifact for a +GitRepository without completing. This can occur due to some of the following +factors: + +- The remote Git repository [URL](#url) is temporarily unavailable. +- The Git repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- A specified Include is unavailable. +- The verification of the Git commit signature failed. +- The credentials in the referenced Secret are invalid. +- The GitRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the GitRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: GitOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the GitRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the +[verification of a Git commit signature](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerifiedCondition` +- `status: "False"` +- `reason: Failed` + +While the GitRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the GitRepository is marked as +[ready](#ready-gitrepository). + +Note that a GitRepository can be [reconciling](#reconciling-gitrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the GitRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the GitRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-gitrepository), or stalled due to error +it can not recover from without human intervention. +The value is the same as the [ignore in spec](#ignore). +It indicates the ignore rules used in building the current artifact in storage. +It is also used by the controller to determine if an artifact needs to be +rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + cue + pkg + ... +``` + +### Observed Recurse Submodules + +The source-controller reports an observed recurse submodule in the +GitRepository's `.status.observedRecurseSubmodules`. The observed recurse +submodules is the latest `.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[recurse submodules in spec](#recurse-submodules). It indicates the recurse +submodules configuration used in building the current artifact in storage. It is +also used by the controller to determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedRecurseSubmodules: true + ... +``` + +### Observed Include + +The source-controller reports observed include in the GitRepository's +`.status.observedInclude`. The observed include is the latest +`.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[include in spec](#include). It indicates the include configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedInclude: + - fromPath: deploy/webapp + repository: + name: repo1 + toPath: foo + - fromPath: deploy/secure + repository: + name: repo2 + toPath: bar + ... +``` + +### Observed Sparse Checkout + +The source-controller reports observed sparse checkout in the GitRepository's +`.status.observedSparseCheckout`. The observed sparse checkout is the latest +`.spec.sparseCheckout` value which resulted in a [ready +state](#ready-gitrepository), or stalled due to error it can not recover from +without human intervention. The value is the same as the [sparseCheckout in +spec](#sparse-checkout). It indicates the sparse checkout configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedSparseCheckout: + - charts + - kustomize + ... +``` + +### Source Verification Mode + +The source-controller reports the Git object(s) it verified in the Git +repository to create an artifact in the GitRepository's +`.status.sourceVerificationMode`. This value is the same as the [verification +mode in spec](#verification). The verification status is applicable only to the +latest Git repository revision used to successfully build and store an +artifact. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the GitRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-gitrepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/helmcharts.md b/docs/spec/v1/helmcharts.md new file mode 100644 index 000000000..eae4d5b9c --- /dev/null +++ b/docs/spec/v1/helmcharts.md @@ -0,0 +1,865 @@ +# Helm Charts + + + +The `HelmChart` API defines a Source to produce an Artifact for a Helm chart +archive with a set of specific configurations. + +## Example + +The following is an example of a HelmChart. It fetches and/or packages a Helm +chart and exposes it as a tarball (`.tgz`) Artifact for the specified +configuration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: '5.*' +``` + +In the above example: + +- A HelmChart named `podinfo` is created, indicated by the `.metadata.name` + field. +- The source-controller fetches the Helm chart every five minutes from the + `podinfo` HelmRepository source reference, indicated by the + `.spec.sourceRef.kind` and `.spec.sourceRef.name` fields. +- The fetched Helm chart version is the latest available chart + version in the range specified in `spec.version`. This version is also used as + Artifact revision, reported in-cluster in the `.status.artifact.revision` + field. +- When the current Helm Chart version differs from the latest available chart + in the version range, it is fetched and/or packaged as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmchart.yaml`. + +**Note:** HelmChart is usually used by the helm-controller. Based on the +HelmRelease configuration, an associated HelmChart is created by the +helm-controller. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmchart.yaml + ``` + +2. Run `kubectl get helmchart` to see the HelmChart: + + ```console + NAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS + podinfo podinfo 5.* HelmRepository podinfo 53s True pulled 'podinfo' chart with version '5.2.1' + ``` + +3. Run `kubectl describe helmchart podinfo` to see the [Artifact](#artifact) and + [Conditions](#conditions) in the HelmChart's Status: + + ```console + Status: + Observed Source Artifact Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Artifact: + Digest: sha256:6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3 + Last Update Time: 2022-02-13T11:24:10Z + Path: helmchart/default/podinfo/podinfo-5.2.1.tgz + Revision: 5.2.1 + Size: 14166 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/podinfo-5.2.1.tgz + Conditions: + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: Ready + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: ArtifactInStorage + Observed Chart Name: podinfo + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ChartPullSucceeded 2m51s source-controller pulled 'podinfo' chart with version '5.2.1' + ``` + +## Writing a HelmChart spec + +As with all other Kubernetes config, a HelmChart needs `apiVersion`, `kind`, and +`metadata` fields. The name of a HelmChart object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmChart also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Source reference + +`.spec.sourceRef` is a required field that specifies a reference to the Source +the chart is available at. + +Supported references are: +- [`HelmRepository`](helmrepositories.md) +- [`GitRepository`](gitrepositories.md) +- [`Bucket`](buckets.md) + +Although there are three kinds of source references, there are only two +underlying implementations. The artifact building process for `GitRepository` +and `Bucket` are the same as they are already built source artifacts. In case +of `HelmRepository`, a chart is fetched and/or packaged based on the +configuration of the Helm chart. + +For a `HelmChart` to be reconciled, the associated artifact in the source +reference must be ready. If the source artifact is not ready, the `HelmChart` +reconciliation is retried. + +When the `metadata.generation` of the `HelmChart` don't match with the +`status.observedGeneration`, the chart is fetched from source and/or packaged. +If there's no `.spec.valuesFiles` specified, the chart is only fetched from the +source, and not packaged. If `.spec.valuesFiles` are specified, the chart is +fetched and packaged with the values files. When the `metadata.generation` +matches the `status.observedGeneration`, the chart is only fetched from source +or from the cache if available, and not packaged. + +When using a `HelmRepository` source reference, the secret reference defined in +the Helm repository is used to fetch the chart. + +The HelmChart reconciliation behavior varies depending on the source reference +kind, see [reconcile strategy](#reconcile-strategy). + +The attributes of the generated artifact also varies depending on the source +reference kind, see [artifact](#artifact). + +### Chart + +`.spec.chart` is a required field that specifies the name or path the Helm chart +is available at in the [Source reference](#source-reference). + +For `HelmRepository` Source reference, it'll be just the name of the chart. + +```yaml +spec: + chart: podinfo + sourceRef: + name: podinfo + kind: HelmRepository +``` + +For `GitRepository` and `Bucket` Source reference, it'll be the path to the +Helm chart directory. + +```yaml +spec: + chart: ./charts/podinfo + sourceRef: + name: podinfo + kind: +``` + +### Version + +`.spec.version` is an optional field to specify the version of the chart in +semver. It is applicable only when the Source reference is a `HelmRepository`. +It is ignored for `GitRepository` and `Bucket` Source reference. It defaults to +the latest version of the chart with value `*`. + +Version can be a fixed semver, minor or patch semver range of a specific +version (i.e. `4.0.x`) or any semver range (i.e. `>=4.0.0 <5.0.0`). + +### Values files + +`.spec.valuesFiles` is an optional field to specify an alternative list of +values files to use as the chart values (values.yaml). The file paths are +expected to be relative to the Source reference. Values files are merged in the +order of the list with the last file overriding the first. It is ignored when +omitted. When values files are specified, the chart is fetched and packaged +with the provided values. + +```yaml +spec: + chart: + spec: + chart: podinfo + ... + valuesFiles: + - values.yaml + - values-production.yaml +``` + +Values files also affect the generated artifact revision, see +[artifact](#artifact). + +### Ignore missing values files + +`.spec.ignoreMissingValuesFiles` is an optional field to specify whether missing +values files should be ignored rather than be considered errors. It defaults to +`false`. + +When `.spec.valuesFiles` and `.spec.ignoreMissingValuesFiles` are specified, +the `.status.observedValuesFiles` field is populated with the list of values +files that were found and actually contributed to the packaged chart. + +### Reconcile strategy + +`.spec.reconcileStrategy` is an optional field to specify what enables the +creation of a new Artifact. Valid values are `ChartVersion` and `Revision`. +`ChartVersion` is used for creating a new artifact when the chart version +changes in a `HelmRepository`. `Revision` is used for creating a new artifact +when the source revision changes in a `GitRepository` or a `Bucket` Source. It +defaults to `ChartVersion`. + +**Note:** If the reconcile strategy is `ChartVersion` and the source reference +is a `GitRepository` or a `Bucket`, no new chart artifact is produced on updates +to the source unless the `version` in `Chart.yaml` is incremented. To produce +new chart artifact on change in source revision, set the reconcile strategy to +`Revision`. + +Reconcile strategy also affects the artifact version, see [artifact](#artifact) +for more details. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Helm Chart source must be checked for updates. + +After successfully reconciling a HelmChart object, the source-controller +requeues the object for inspection after the specified interval. The value must +be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the source for updates every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmChart objects are set +up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmChart. When set to `true`, the controller will stop reconciling the +HelmChart, and changes to the resource or the Helm chart Source will not result +in a new Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +### Verification + +**Note:** This feature is available only for Helm charts fetched from an OCI Registry. + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the HelmChart, containing the public keys of trusted authors. For Notation this Secret should also include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of HelmChart hosted in an OCI Registry, create a Kubernetes +secret with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify a HelmChart's signature. +This allows for older HelmCharts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available HelmCharts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying HelmCharts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + interval: 5m + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: ">=6.1.6" + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo +spec: + interval: 1m0s + url: oci://ghcr.io/stefanprodan/charts + type: "oci" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +## Working with HelmCharts + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmChart outside the +[specified interval window](#interval), a HelmCHart can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmchart/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmChart to reach a +[ready state](#ready-helmchart) using `kubectl`: + +```sh +kubectl wait helmchart/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmChart, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmChart + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +**Note:** When a HelmChart has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmChart's Status until it is resumed. + +#### Resume a HelmChart + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +### Debugging a HelmChart + +There are several ways to gather information about a HelmChart for debugging +purposes. + +#### Describe the HelmChart + +Describing a HelmChart using `kubectl describe helmchart ` displays +the latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: Stalled + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: False + Type: Ready + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: FetchFailed + Last Handled Reconcile At: 1644759954 + Observed Chart Name: podinfo + Observed Generation: 3 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning InvalidChartReference 11s source-controller invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with ver +sion matching '9.*' found +``` + +#### Trace emitted Events + +To view events for specific HelmChart(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmChart/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +22s Warning InvalidChartReference helmchart/ invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found +2s Normal ChartPullSucceeded helmchart/ pulled 'podinfo' chart with version '6.0.3' +2s Normal ArtifactUpToDate helmchart/ artifact up-to-date with remote revision: '6.0.3' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmChart, e.g. `flux logs --level=error --kind=HelmChart --name=`. + +### Improving resource consumption by enabling the cache + +When using a `HelmRepository` as Source for a `HelmChart`, the controller loads +the repository index in memory to find the latest version of the chart. + +The controller can be configured to cache Helm repository indexes in memory. +The cache is used to avoid loading repository indexes for every `HelmChart` +reconciliation. + +The following flags are provided to enable and configure the cache: +- `helm-cache-max-size`: The maximum size of the cache in number of indexes. + If `0`, then the cache is disabled. +- `helm-cache-ttl`: The TTL of an index in the cache. +- `helm-cache-purge-interval`: The interval at which the cache is purged of + expired items. + +The caching strategy is to pull a repository index from the cache if it is +available, otherwise to load the index, retrieve and build the chart, +then cache the index. The cached index TTL is refreshed every time the +Helm repository index is loaded with the `helm-cache-ttl` value. + +The cache is purged of expired items every `helm-cache-purge-interval`. + +When the cache is full, no more items can be added to the cache, and the +source-controller will report a warning event instead. + +In order to use the cache, set the related flags in the source-controller +Deployment config: + +```yaml + spec: + containers: + - args: + - --watch-all-namespaces + - --log-level=info + - --log-encoding=json + - --enable-leader-election + - --storage-path=/data + - --storage-adv-addr=source-controller.$(RUNTIME_NAMESPACE).svc.cluster.local. + ## Helm cache with up to 10 items, i.e. 10 indexes. + - --helm-cache-max-size=10 + ## TTL of an index is 1 hour. + - --helm-cache-ttl=1h + ## Purge expired index every 10 minutes. + - --helm-cache-purge-interval=10m +``` + +## HelmChart Status + +### Artifact + +The HelmChart reports the last built chart as an Artifact object in the +`.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`-.tgz`), +and can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e + lastUpdateTime: "2022-02-10T18:53:47Z" + path: helmchart///-.tgz + revision: 6.0.3 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-.tgz +``` + +When using a `HelmRepository` as the source reference and values files are +provided, the value of `status.artifact.revision` is the chart version combined +with the `HelmChart` object generation. For example, if the chart version is +`6.0.3` and the `HelmChart` object generation is `1`, the +`status.artifact.revision` value will be `6.0.3+1`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+1.tgz + revision: 6.0.3+1 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+1.tgz + observedGeneration: 1 + ... +``` + +When using a `GitRepository` or a `Bucket` as the source reference and +`Revision` as the reconcile strategy, the value of `status.artifact.revision` is +the chart version combined with the first 12 characters of the revision of the +`GitRepository` or `Bucket`. For example if the chart version is `6.0.3` and the +revision of the `Bucket` is `4e5cbb7b97d00a8039b8810b90b922f4256fd3bd8f78b934b4892dae13f7ca87`, +the `status.artifact.revision` value will be `6.0.3+4e5cbb7b97d0`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+4e5cbb7b97d0.tgz + revision: 6.0.3+4e5cbb7b97d0 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+4e5cbb7b97d0.tgz +``` + +### Conditions + +A HelmChart enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmchart) while fetching or building the +chart, it can be [ready](#ready-helmchart), it can +[fail during reconciliation](#failed-helmchart), or it can +[stall](#stalled-helmchart). + +The HelmChart API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmChart to become +`Ready`. + +#### Reconciling HelmChart + +The source-controller marks a HelmChart as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the HelmChart, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the HelmChart is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmChart is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmChart's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new version, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewChart` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmChart while their status value is `"True"`. + +#### Ready HelmChart + +The source-controller marks a HelmChart as _ready_ when it has the following +characteristics: + +- The HelmChart reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch and build the Helm chart using the current + spec. +- The version/revision of the reported Artifact is up-to-date with the + latest version/revision of the Helm chart. + +When the HelmChart is "ready", the controller sets a Condition with the +following attributes in the HelmChart's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmChart is marked as [reconciling](#reconciling-helmchart), or e.g. +a [transient error](#failed-helmchart) occurs due to a temporary network issue. + +When the HelmChart Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmChart's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmChart + +The source-controller may get stuck trying to produce an Artifact for a +HelmChart without completing. This can occur due to some of the following +factors: + +- The Helm chart Source is temporarily unavailable. +- The credentials in the [Source reference](#source-reference) Secret are + invalid. +- The HelmChart spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmChart's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: StorageOperationFailed` | `reason: URLInvalid` | `reason: IllegalPath` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmChart while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmChart has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmChart is marked as [ready](#ready-helmchart). + +Note that a HelmChart can be [reconciling](#reconciling-helmchart) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmChart spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmChart + +The source-controller can mark a HelmChart as _stalled_ when it determines that +without changes to the spec, the reconciliation can not succeed. +For example because a HelmChart Version is set to a non-existing version. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmchart), but adds another Condition with the following +attributes to the HelmChart's `.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: InvalidChartReference` + +While the HelmChart has this Condition, the controller will not requeue the +resource any further, and will stop reconciling the resource until a change to +the spec is made. + +### Observed Source Artifact Revision + +The source-controller reports the revision of the last +[Source reference's](#source-reference) Artifact the current chart was fetched +from in the HelmChart's `.status.observedSourceArtifactRevision`. It is used to +keep track of the source artifact revision and detect when a new source +artifact is available. + +### Observed Chart Name + +The source-controller reports the last resolved chart name of the Artifact +for the [`.spec.chart` field](#chart) in the HelmChart's +`.status.observedChartName`. It is used to keep track of the chart and detect +when a new chart is found. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmChart's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-helmchart), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/helmrepositories.md b/docs/spec/v1/helmrepositories.md new file mode 100644 index 000000000..97fdff2ec --- /dev/null +++ b/docs/spec/v1/helmrepositories.md @@ -0,0 +1,881 @@ +# Helm Repositories + + + +There are 2 [Helm repository types](#type) defined by the `HelmRepository` API: +- Helm HTTP/S repository, which defines a Source to produce an Artifact for a Helm +repository index YAML (`index.yaml`). +- OCI Helm repository, which defines a source that does not produce an Artifact. + It's a data container to store the information about the OCI repository that + can be used by [HelmChart](helmcharts.md) to access OCI Helm charts. + +## Examples + +### Helm HTTP/S repository + +The following is an example of a HelmRepository. It creates a YAML (`.yaml`) +Artifact from the fetched Helm repository index (in this example the [podinfo +repository](https://github.com/stefanprodan/podinfo)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://stefanprodan.github.io/podinfo +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller fetches the Helm repository index YAML every five + minutes from `https://stefanprodan.github.io/podinfo`, indicated by the + `.spec.interval` and `.spec.url` fields. +- The digest (algorithm defaults to SHA256) of the Helm repository index after + stable sorting the entries is used as Artifact revision, reported in-cluster + in the `.status.artifact.revision` field. +- When the current HelmRepository revision differs from the latest fetched + revision, it is stored as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://stefanprodan.github.io/podinfo 4s True stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + ``` + +3. Run `kubectl describe helmrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the HelmRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Last Update Time: 2022-02-04T09:55:58Z + Path: helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Size: 40898 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Conditions: + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 1m source-controller fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' + ``` + +### Helm OCI repository + +The following is an example of an OCI HelmRepository. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + type: "oci" + interval: 5m0s + url: oci://ghcr.io/stefanprodan/charts +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- A HelmChart that refers to this HelmRepository uses the URL in the `.spec.url` + field to access the OCI Helm chart. + +**NOTE:** The `.spec.interval` field is only used by the `default` Helm +repository and is ignored for any value in `oci` Helm repository. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/charts 3m22s + ``` + +Because the OCI Helm repository is a data container, there's nothing to report +for `READY` and `STATUS` columns above. The existence of the object can be +considered to be ready for use. + +## Writing a HelmRepository spec + +As with all other Kubernetes config, a HelmRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a HelmRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Type + +`.spec.type` is an optional field that specifies the Helm repository type. + +Possible values are `default` for a Helm HTTP/S repository, or `oci` for an OCI Helm repository. + +**Note:**: For improved support for OCI Helm charts, please use the +[`OCIRepository`](ocirepositories.md) API. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used +for authentication purposes. + +Supported options are: +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when static credentials +are used for authentication. If you do not specify `.spec.provider`, it defaults +to `generic`. + +**Note**: The provider field is supported only for Helm OCI repositories. The `spec.type` +field must be set to `oci`. + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS worker +node IAM role or IAM Role for Service Accounts (IRSA), and by extension gain access +to ECR. + +##### EKS Worker Node IAM Role + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +##### IAM Role for Service Accounts (IRSA) + +When using IRSA to enable access to ECR, add the following patch to your bootstrap +repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running on +it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Azure Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes or +Workload Identity, and by extension gain access to GCR or Artifact Registry. + +##### Access Scopes + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and Artifact Registry, +source-controller running on it will also have access to them. + +##### GKE Workload Identity + +When using Workload Identity to enable access to GCR or Artifact Registry, add the +following patch to your bootstrap repository, in the `flux-system/kustomization.yaml` +file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using Google Container Registry service, +the needed permission is instead `storage.objects.list` which can be bound as part +of the Container Registry Service Agent role. Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure non-TLS connections when fetching Helm chart OCI artifacts. + +**Note**: The insecure field is supported only for Helm OCI repositories. +The `spec.type` field must be set to `oci`. + +### Interval + +**Note:** This field is ineffectual for [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.interval` is a an optional field that specifies the interval which the +Helm repository index must be consulted at. When not set, the default value is +`1m`. + +After successfully reconciling a HelmRepository object, the source-controller +requeues the object for inspection after the specified interval. The value +must be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to fetch the HelmRepository index YAML every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmRepository objects +are set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### URL + +`.spec.url` is a required field that depending on the [type of the HelmRepository object](#type) +specifies the HTTP/S or OCI address of a Helm repository. + +For OCI, the URL is expected to point to a registry repository, e.g. `oci://ghcr.io/fluxcd/source-controller`. + +For Helm repositories which require authentication, see [Secret reference](#secret-reference). + +### Timeout + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.timeout` is an optional field to specify a timeout for the fetch +operation. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. When not set, the +default value is `1m`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the HelmRepository, containing authentication +credentials for the repository. + +#### Basic access authentication + +To authenticate towards a Helm repository using basic access authentication +(in other words: using a username and password), the referenced Secret is +expected to contain `.data.username` and `.data.password` values. + +For example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + secretRef: + name: example-user +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-user + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +OCI Helm repository example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/my-user/my-private-repo + type: "oci" + secretRef: + name: oci-creds +--- +apiVersion: v1 +kind: Secret +metadata: + name: oci-creds + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +For OCI Helm repositories, Kubernetes secrets of type [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types) are also supported. +It is possible to create one such secret with `kubectl create secret docker-registry` +or using the Flux CLI: + +```yaml +flux create secret oci ghcr-auth \ + --url=ghcr.io \ + --username=flux \ + --password=${GITHUB_PAT} +``` + +**Warning:** Support for specifying TLS authentication data using this API has been +deprecated. Please use [`.spec.certSecretRef`](#cert-secret-reference) instead. +If the controller uses the secret specified by this field to configure TLS, then +a deprecation warning will be logged. + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards a Helm repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Pass credentials + +`.spec.passCredentials` is an optional field to allow the credentials from the +[Secret reference](#secret-reference) to be passed on to a host that does not +match the host as defined in URL. This may for example be required if the host +advertised chart URLs in the index differ from the specified URL. + +Enabling this should be done with caution, as it can potentially result in +credentials getting stolen in a man-in-the-middle attack. This feature only applies +to HTTP/S Helm repositories. + +### Suspend + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmRepository. When set to `true`, the controller will stop reconciling the +HelmRepository, and changes to the resource or the Helm repository index will +not result in a new Artifact. When the field is set to `false` or removed, it +will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with HelmRepositories + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, once created, they +are ready to used by [HelmCharts](helmcharts.md). + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmRepository outside the +[specified interval window](#interval), a HelmRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source helm +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmRepository to +reach a [ready state](#ready-helmrepository) using `kubectl`: + +```sh +kubectl wait helmrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source helm +``` + +**Note:** When a HelmRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmRepository's Status until it is resumed. + +#### Resume a HelmRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source helm +``` + +### Debugging a HelmRepository + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, they are static +objects that don't require debugging if valid. + +There are several ways to gather information about a HelmRepository for debugging +purposes. + +#### Describe the HelmRepository + +Describing a HelmRepository using `kubectl describe helmrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: Stalled + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: False + Type: Ready + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: FetchFailed + Observed Generation: 2 + URL: http://source-controller.source-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning Failed 6s source-controller failed to construct Helm client: scheme "invalid" not supported +``` + +#### Trace emitted Events + +To view events for specific HelmRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +107s Warning Failed helmrepository/ failed to construct Helm client: scheme "invalid" not supported +7s Normal NewArtifact helmrepository/ fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' +3s Normal ArtifactUpToDate helmrepository/ artifact up-to-date with remote revision: 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmRepository, e.g. `flux logs --level=error --kind=HelmRepository --name=`. + +## HelmRepository Status + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), they do not contain any information in the +status. + +### Artifact + +The HelmRepository reports the last fetched repository index as an Artifact +object in the `.status.artifact` of the resource. + +The Artifact file is an exact copy of the Helm repository index YAML +(`index-.yaml`) as fetched, and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +status: + artifact: + digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + lastUpdateTime: "2022-02-04T09:55:58Z" + path: helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + size: 40898 + url: http://source-controller.flux-system.svc.cluster.local./helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml +``` + +### Conditions + +A HelmRepository enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmrepository) while fetching the +repository index, it can be [ready](#ready-helmrepository), it can +[fail during reconciliation](#failed-helmrepository), or it can +[stall](#stalled-helmrepository). + +The HelmRepository API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmRepository to become +`Ready`. + +#### Reconciling HelmRepository + +The source-controller marks a HelmRepository as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the HelmRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the HelmRepository is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmRepository while their status value is `"True"`. + +#### Ready HelmRepository + +The source-controller marks a HelmRepository as _ready_ when it has the following +characteristics: + +- The HelmRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch the Helm repository index using the current + spec. +- The revision of the reported Artifact is up-to-date with the latest + revision of the Helm repository. + +When the HelmRepository is "ready", the controller sets a Condition with the following +attributes in the HelmRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmRepository is marked as [reconciling](#reconciling-helmrepository), or e.g. +a [transient error](#failed-helmrepository) occurs due to a temporary network +issue. + +When the HelmRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmRepository + +The source-controller may get stuck trying to produce an Artifact for a +HelmRepository without completing. This can occur due to some of the following +factors: + +- The Helm repository [URL](#url) is temporarily unavailable. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The HelmRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: IndexationFailed` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmRepository has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmRepository is marked as [ready](#ready-helmrepository). + +Note that a HelmRepository can be [reconciling](#reconciling-helmrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmRepository + +The source-controller can mark a HelmRepository as _stalled_ when it determines +that without changes to the spec, the reconciliation can not succeed. +For example because a Helm repository URL with an unsupported protocol is +specified. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmrepository), but adds another Condition with the following +attributes to the HelmRepository's +`.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: URLInvalid` + +While the HelmRepository has this Condition, the controller will not requeue +the resource any further, and will stop reconciling the resource until a change +to the spec is made. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-helmrepository), +or stalled due to error it can not recover from without human intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/ocirepositories.md b/docs/spec/v1/ocirepositories.md new file mode 100644 index 000000000..d2bfa399e --- /dev/null +++ b/docs/spec/v1/ocirepositories.md @@ -0,0 +1,1147 @@ +# OCI Repositories + + + +The `OCIRepository` API defines a Source to produce an Artifact for an OCI +repository. + +## Example + +The following is an example of an OCIRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from an OCI repository for the +resolved digest. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + tag: latest +``` + +In the above example: + +- An OCIRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the OCI repository every five minutes, indicated + by the `.spec.interval` field. +- It pulls the `latest` tag of the `ghcr.io/stefanprodan/manifests/podinfo` + repository, indicated by the `.spec.ref.tag` and `.spec.url` fields. +- The resolved tag and SHA256 digest is used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current OCIRepository digest differs from the latest fetched + digest, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `ocirepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f ocirepository.yaml + ``` + +2. Run `kubectl get ocirepository` to see the OCIRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + ``` + +3. Run `kubectl describe ocirepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the OCIRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b + Last Update Time: 2025-06-14T11:23:36Z + Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Revision: latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Size: 1105 + URL: http://source-controller.flux-system.svc.cluster.local./ocirepository/oci/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Conditions: + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact with revision 'latest/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' + ``` + +## Writing an OCIRepository spec + +As with all other Kubernetes config, an OCIRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of an OCIRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +An OCIRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the address of the +container image repository in the format `oci://://`. + +**Note:** that specifying a tag or digest is not acceptable for this field. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used for +authentication purposes. + +Supported options are: + +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when +static credentials are used for authentication, either with +`spec.secretRef` or `spec.serviceAccountName`. +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS +worker node IAM role or IAM Role for Service Accounts (IRSA), and by extension +gain access to ECR. + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +When using IRSA to enable access to ECR, add the following patch to your +bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running +on it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes +or Workload Identity, and by extension gain access to GCR or Artifact Registry. + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and +Artifact Registry, source-controller running on it will also have access to them. + +When using Workload Identity to enable access to GCR or Artifact Registry, add +the following patch to your bootstrap repository, in the +`flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using +Google Container Registry service, the needed permission is instead `storage.objects.list` +which can be bound as part of the Container Registry Service Agent role. +Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the OCIRepository, containing authentication +credentials for the OCI repository. + +This secret is expected to be in the same format as [`imagePullSecrets`][image-pull-secrets]. +The usual way to create such a secret is with: + +```sh +kubectl create secret docker-registry ... +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as OCIRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible image repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards an OCI repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: oci://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +OCI repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple OCIRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for OCI operations +like pulling. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the OCI reference to resolve and +watch for changes. References are specified in one or more subfields +(`.tag`, `.semver`, `.digest`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to the `latest` +tag. + +#### Tag example + +To pull a specific tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + tag: "" +``` + +#### SemVer example + +To pull a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.tag`](#tag-example). + +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + +#### Digest example + +To pull a specific digest, use `.spec.ref.digest`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + digest: "sha256:" +``` + +This field takes precedence over all other fields. + +### Layer selector + +`spec.layerSelector` is an optional field to specify which layer should be extracted from the OCI Artifact. +If not specified, the controller will extract the first layer found in the artifact. + +To extract a layer matching a specific +[OCI media type](https://github.com/opencontainers/image-spec/blob/v1.0.2/media-types.md): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + layerSelector: + mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + operation: extract # can be 'extract' or 'copy', defaults to 'extract' +``` + +If the layer selector matches more than one layer, the first layer matching the specified media type will be used. +Note that the selected OCI layer must be +[compressed](https://github.com/opencontainers/image-spec/blob/v1.0.2/layer.md#gzip-media-types) +in the `tar+gzip` format. + +When `.spec.layerSelector.operation` is set to `copy`, instead of extracting the +compressed layer, the controller copies the tarball as-is to storage, thus +keeping the original content unaltered. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Verification + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available OCI artifacts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying artifacts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +OCIRepository. When set to `true`, the controller will stop reconciling the +OCIRepository, and changes to the resource or in the OCI repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +## Working with OCIRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the OCI repository contents as an Artifact. +It is possible to overwrite and/or overrule the default exclusions using +the [`.spec.ignore` field](#ignore). + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the artifact. +The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern +entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the artifact root or in subdirectories. + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a OCIRepository outside the +[specified interval window](#interval), an OCIRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the OCIRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite ocirepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source oci +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the OCIRepository to reach +a [ready state](#ready-ocirepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of an OCIRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend an OCIRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source oci +``` + +**Note:** When an OCIRepository has an Artifact and it is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +OCIRepository's Status until it is resumed. + +#### Resume an OCIRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source oci +``` + +### Debugging an OCIRepository + +There are several ways to gather information about a OCIRepository for +debugging purposes. + +#### Describe the OCIRepository + +Describing an OCIRepository using +`kubectl describe ocirepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2025-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: False + Type: Ready + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./ocirepository/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning OCIOperationFailed 2s (x9 over 4s) source-controller failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +#### Trace emitted Events + +To view events for specific OCIRepository(s), `kubectl events` can be used +in combination with `--for` to list the Events for specific objects. For +example, running + +```sh +kubectl events --for OCIRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact ocirepository/ stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote revision: 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +94s Warning OCIOperationFailed ocirepository/ failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific OCIRepository, e.g. +`flux logs --level=error --kind=OCIRepository --name=`. + +## OCIRepository Status + +### Artifact + +The OCIRepository reports the latest synchronized state from the OCI repository +as an Artifact object in the `.status.artifact` of the resource. + +The `.status.artifact.revision` holds the tag and SHA256 digest of the upstream OCI artifact. + +The `.status.artifact.metadata` holds the upstream OCI artifact metadata such as the +[OpenContainers standard annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md). +If the OCI artifact was created with `flux push artifact`, then the `metadata` will contain the following +annotations: +- `org.opencontainers.image.created` the date and time on which the artifact was built +- `org.opencontainers.image.source` the URL of the Git repository containing the source files +- `org.opencontainers.image.revision` the Git branch and commit SHA1 of the source files + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +status: + artifact: + digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 + lastUpdateTime: "2025-08-08T09:35:45Z" + metadata: + org.opencontainers.image.created: "2025-08-08T12:31:41+03:00" + org.opencontainers.image.revision: 6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872 + org.opencontainers.image.source: https://github.com/stefanprodan/podinfo.git + path: ocirepository///.tar.gz + revision: @ + size: 1105 + url: http://source-controller..svc.cluster.local./ocirepository///.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +OCIRepository has various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-ocirepository) while fetching the remote +state, it can be [ready](#ready-ocirepository), or it can [fail during +reconciliation](#failed-ocirepository). + +The OCIRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the OCIRepository to +become `Ready`. + +#### Reconciling OCIRepository + +The source-controller marks an OCIRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the OCIRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the OCIRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact digest differs from the current Artifact. + +When the OCIRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the OCIRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the OCIRepository while their status value is `"True"`. + +#### Ready OCIRepository + +The source-controller marks an OCIRepository as _ready_ when it has the +following characteristics: + +- The OCIRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote OCI repository using + the current spec. +- The digest of the reported Artifact is up-to-date with the latest + resolved digest of the remote OCI repository. + +When the OCIRepository is "ready", the controller sets a Condition with the +following attributes in the OCIRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +OCIRepository is marked as [reconciling](#reconciling-ocirepository), or e.g. a +[transient error](#failed-ocirepository) occurs due to a temporary network issue. + +When the OCIRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +OCIRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed OCIRepository + +The source-controller may get stuck trying to produce an Artifact for a +OCIRepository without completing. This can occur due to some of the following +factors: + +- The remote OCI repository [URL](#url) is temporarily unavailable. +- The OCI repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The OCIRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the OCIRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: OCIArtifactPullFailed` | `reason: OCIArtifactLayerOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the OCIRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the signature +[verification](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "False"` +- `reason: VerificationError` + +While the OCIRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the OCIRepository is marked as +[ready](#ready-ocirepository). + +Note that a OCIRepository can be [reconciling](#reconciling-ocirepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the OCIRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the OCIRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-ocirepository), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. It is also used by the controller to determine if +an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Layer Selector + +The source-controller reports an observed layer selector in the OCIRepository's +`.status.observedLayerSelector`. The observed layer selector is the latest +`.spec.layerSelector` value which resulted in a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human intervention. +The value is the same as the [layer selector in spec](#layer-selector). +It indicates the layer selection configuration used in building the current +artifact in storage. It is also used by the controller to determine if an +artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedLayerSelector: + mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip + operation: copy + ... +``` + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the OCIRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus +[image-pull-secrets]: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +[image-auto-provider-secrets]: https://fluxcd.io/flux/guides/image-update/#imagerepository-cloud-providers-authentication +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[sops-guide]: https://fluxcd.io/flux/guides/mozilla-sops/ diff --git a/docs/spec/v1alpha1/helmrepositories.md b/docs/spec/v1alpha1/helmrepositories.md index 966460dfd..e2d1bfc2f 100644 --- a/docs/spec/v1alpha1/helmrepositories.md +++ b/docs/spec/v1alpha1/helmrepositories.md @@ -19,7 +19,7 @@ type HelmRepositorySpec struct { // repository. // For HTTP/S basic auth the secret must contain username and // password fields. - // For TLS the secret must contain caFile, keyFile and caCert + // For TLS the secret must contain caFile, keyFile and caFile // fields. // +optional SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` diff --git a/docs/spec/v1beta1/helmrepositories.md b/docs/spec/v1beta1/helmrepositories.md index e00fd674e..c194a72a0 100644 --- a/docs/spec/v1beta1/helmrepositories.md +++ b/docs/spec/v1beta1/helmrepositories.md @@ -20,7 +20,7 @@ type HelmRepositorySpec struct { // For HTTP/S basic auth the secret must contain username and // password fields. // For TLS the secret must contain a certFile and keyFile, and/or - // caCert fields. + // caFile fields. // +optional SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` diff --git a/docs/spec/v1beta2/buckets.md b/docs/spec/v1beta2/buckets.md index 307cd03e0..a78516f88 100644 --- a/docs/spec/v1beta2/buckets.md +++ b/docs/spec/v1beta2/buckets.md @@ -1,5 +1,7 @@ # Buckets + + The `Bucket` API defines a Source to produce an Artifact for objects from storage solutions like Amazon S3, Google Cloud Storage buckets, or any other solution with a S3 compatible API such as Minio, Alibaba Cloud OSS and others. @@ -48,8 +50,8 @@ In the above example: - A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag) in the `.spec.bucketName` bucket is compiled, while filtering the keys using [default ignore rules](#default-exclusions). -- The SHA256 sum of the list is used as Artifact revision, reported - in-cluster in the `.status.artifact.revision` field. +- The digest (algorithm defaults to SHA256) of the list is used as Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. - When the current Bucket revision differs from the latest calculated revision, all objects are fetched and archived. - The new Artifact is reported in the `.status.artifact` field. @@ -71,7 +73,7 @@ control over. ```console NAME ENDPOINT AGE READY STATUS - minio-bucket minio.example.com 34s True stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + minio-bucket minio.example.com 34s True stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` 3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact) @@ -81,20 +83,21 @@ control over. ... Status: Artifact: - Checksum: 72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 + Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 Last Update Time: 2022-02-01T23:43:38Z Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz - Revision: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Revision: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Size: 38099 URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz Conditions: Last Transition Time: 2022-02-01T23:43:38Z - Message: stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' Observed Generation: 1 Reason: Succeeded Status: True Type: Ready Last Transition Time: 2022-02-01T23:43:38Z - Message: stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' Observed Generation: 1 Reason: Succeeded Status: True @@ -104,7 +107,7 @@ control over. Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal NewArtifact 82s source-controller fetched 16 files from 'example' + Normal NewArtifact 82s source-controller stored artifact with 16 fetched files from 'example' bucket ``` ## Writing a Bucket spec @@ -277,6 +280,7 @@ Without a [Secret reference](#secret-reference), authentication using a chain with: - [Environment credentials](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential) +- [Workload Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.3.0-beta.4#WorkloadIdentityCredential) - [Managed Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) with the `AZURE_CLIENT_ID` - Managed Identity with a system-assigned identity @@ -433,22 +437,103 @@ data: accountKey: ``` -#### Managed Identity with AAD Pod Identity +##### Workload Identity + +If you have [Workload Identity](https://azure.github.io/azure-workload-identity/docs/installation/managed-clusters.html) +set up on your cluster, you need to create an Azure Identity and give it +access to Azure Blob Storage. + +```shell +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +``` + +Establish a federated identity between the Identity and the source-controller +ServiceAccount. + +```shell +export SERVICE_ACCOUNT_ISSUER="$(az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -otsv)" + +az identity federated-credential create \ + --name "kubernetes-federated-credential" \ + --identity-name "${IDENTITY_NAME}" \ + --resource-group "${RESOURCE_GROUP}" \ + --issuer "${SERVICE_ACCOUNT_ISSUER}" \ + --subject "system:serviceaccount:flux-system:source-controller" +``` + +Add a patch to label and annotate the source-controller Deployment and ServiceAccount +correctly so that it can match an identity binding: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` -If you are using [aad pod identity](https://azure.github.io/aad-pod-identity/docs), you can create an identity that has access to Azure Storage. +If you have set up Workload Identity correctly and labeled the source-controller +Deployment and ServiceAccount, then you don't need to reference a Secret. For more information, +please see [documentation](https://azure.github.io/azure-workload-identity/docs/quick-start.html). + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testsas + endpoint: https://testfluxsas.blob.core.windows.net +``` + +##### Deprecated: Managed Identity with AAD Pod Identity + +If you are using [aad pod identity](https://azure.github.io/aad-pod-identity/docs), +You need to create an Azure Identity and give it access to Azure Blob Storage. ```sh export IDENTITY_NAME="blob-access" -az role assignment create --role "Storage Blob Data Contributor" \ ---assignee-object-id "$(az identity show -n blob-access -o tsv --query principalId -g $RESOURCE_GROUP)" \ ---scope "/subscriptions//resourceGroups/aks-somto/providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" export IDENTITY_CLIENT_ID="$(az identity show -n ${IDENTITY_NAME} -g ${RESOURCE_GROUP} -otsv --query clientId)" export IDENTITY_RESOURCE_ID="$(az identity show -n ${IDENTITY_NAME} -otsv --query id)" ``` -Create an `AzureIdentity` object that references the identity created above: +Create an AzureIdentity object that references the identity created above: ```yaml --- @@ -463,7 +548,8 @@ spec: type: 0 # user-managed identity ``` -Create an `AzureIdentityBinding` object that binds pods with a specific selector with the `AzureIdentity` created: +Create an AzureIdentityBinding object that binds Pods with a specific selector +with the AzureIdentity created: ```yaml apiVersion: "aadpodidentity.k8s.io/v1" @@ -475,7 +561,7 @@ spec: selector: ${IDENTITY_NAME} ``` -Label the source-controller correctly so that it can match an identity binding: +Label the source-controller Deployment correctly so that it can match an identity binding: ```yaml apiVersion: apps/v1 @@ -490,7 +576,8 @@ spec: aadpodidbinding: ${IDENTITY_NAME} # match the AzureIdentity name ``` -If you have set aad-pod-identity up correctly and labeled the source-controller pod, then you don't need to reference a secret. +If you have set up aad-pod-identity correctly and labeled the source-controller +Deployment, then you don't need to reference a Secret. ```yaml apiVersion: source.toolkit.fluxcd.io/v1beta2 @@ -532,13 +619,28 @@ data: sasKey: ``` -The sasKey only contains the SAS token e.g `?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. -The leading question mark is optional. -The query values from the `sasKey` data field in the Secrets gets merged with the ones in the `spec.endpoint` of the `Bucket`. -If the same key is present in the both of them, the value in the `sasKey` takes precedence. +The `sasKey` only contains the SAS token e.g +`?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. +The leading question mark (`?`) is optional. The query values from the `sasKey` +data field in the Secrets gets merged with the ones in the `.spec.endpoint` of +the Bucket. If the same key is present in the both of them, the value in the +`sasKey` takes precedence. + +**Note:** The SAS token has an expiry date, and it must be updated before it +expires to allow Flux to continue to access Azure Storage. It is allowed to use +an account-level or container-level SAS token. + +The minimum permissions for an account-level SAS token are: -Note that the Azure SAS Token has an expiry date and it should be updated before it expires so that Flux can -continue to access Azure Storage. +- Allowed services: `Blob` +- Allowed resource types: `Container`, `Object` +- Allowed permissions: `Read`, `List` + +The minimum permissions for a container-level SAS token are: + +- Allowed permissions: `Read`, `List` + +Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/storageservices/create-account-sas#blob-service) for a full overview on permissions. #### GCP @@ -622,7 +724,7 @@ Where the (base64 decoded) value of `.data.serviceaccount` looks like this: ### Interval -`.spec.interval` is a required field that specifices the interval which the +`.spec.interval` is a required field that specifies the interval which the object storage bucket must be consulted at. After successfully reconciling a Bucket object, the source-controller requeues @@ -631,7 +733,12 @@ the object for inspection after the specified interval. The value must be in a e.g. `10m0s` to look at the object storage bucket every 10 minutes. If the `.metadata.generation` of a resource changes (due to e.g. the apply of a -change to the spec), this is handled instantly outside of the interval window. +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple Bucket objects are set up +with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). ### Endpoint @@ -642,6 +749,83 @@ HTTP endpoint requires enabling [`.spec.insecure`](#insecure). Some endpoints require the specification of a [`.spec.region`](#region), see [Provider](#provider) for more (provider specific) examples. +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + ### Bucket name `.spec.bucketName` is a required field that specifies which object storage @@ -656,6 +840,100 @@ See [Provider](#provider) for more (provider specific) examples. See [Provider](#provider) for more (provider specific) examples. +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + ### Insecure `.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) @@ -678,6 +956,15 @@ credentials for the object storage. For some `.spec.provider` implementations the presence of the field is required, see [Provider](#provider) for more details and examples. +### Prefix + +`.spec.prefix` is an optional field to enable server-side filtering +of files in the Bucket. + +**Note:** The server-side filtering works only with the `generic`, `aws` +and `gcp` [provider](#provider) and is preferred over [`.spec.ignore`](#ignore) +as a more efficient way of excluding files. + ### Ignore `.spec.ignore` is an optional field to specify rules in [the `.gitignore` @@ -741,7 +1028,7 @@ spec: ### Triggering a reconcile -To manually tell the source-controller to reconcile a Bucket outside of the +To manually tell the source-controller to reconcile a Bucket outside the [specified interval window](#interval), a Bucket can be annotated with `reconcile.fluxcd.io/requestedAt: `. Annotating the resource queues the Bucket for reconciliation if the `` differs from @@ -854,9 +1141,9 @@ Status: ... Conditions: Last Transition Time: 2022-02-02T13:26:55Z - Message: reconciling new object generation (2) + Message: processing object: new generation 1 -> 2 Observed Generation: 2 - Reason: NewGeneration + Reason: ProgressingWithRetry Status: True Type: Reconciling Last Transition Time: 2022-02-02T13:26:55Z @@ -881,12 +1168,12 @@ Events: #### Trace emitted Events -To view events for specific Bucket(s), `kubectl get events` can be used in -combination with `--field-sector` to list the Events for specific objects. -For example, running +To view events for specific Bucket(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running ```sh -kubectl get events --field-selector involvedObject.kind=Bucket,involvedObject.name= +kubectl events --for Bucket/ ``` lists @@ -894,7 +1181,7 @@ lists ```console LAST SEEN TYPE REASON OBJECT MESSAGE 2m30s Normal NewArtifact bucket/ fetched 16 files with revision from 'my-new-bucket' -36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 18s Warning BucketOperationFailed bucket/ bucket 'my-new-bucket' does not exist ``` @@ -923,10 +1210,11 @@ metadata: name: status: artifact: - checksum: cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a + digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a lastUpdateTime: "2022-01-28T10:30:30Z" path: bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz - revision: c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + revision: sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + size: 38099 url: http://source-controller..svc.cluster.local./bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz ``` @@ -966,13 +1254,13 @@ is true: - The generation of the Bucket is newer than the [Observed Generation](#observed-generation). - The newly calculated Artifact revision differs from the current Artifact. -When the Bucket is "reconciling", the `Ready` Condition status becomes `False`, -and the controller adds a Condition with the following attributes to the -Bucket's `.status.conditions`: +When the Bucket is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the Bucket's `.status.conditions`: - `type: Reconciling` - `status: "True"` -- `reason: NewGeneration` | `reason: NoArtifact` | `reason: NewRevision` +- `reason: Progressing` | `reason: ProgressingWithRetry` If the reconciling state is due to a new revision, an additional Condition is added with the following attributes: @@ -1050,7 +1338,28 @@ it succeeds and the Bucket is marked as [ready](#ready-bucket). Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at the same time, for example due to a newly introduced configuration issue in the -Bucket spec. +Bucket spec. When a reconciliation fails, the `Reconciling` Condition reason +would be `ProgressingWithRetry`. When the reconciliation is performed again +after the failure, the reason is updated to `Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the Bucket's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-bucket), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` ### Observed Generation diff --git a/docs/spec/v1beta2/gitrepositories.md b/docs/spec/v1beta2/gitrepositories.md index a25569422..03ffbeb82 100644 --- a/docs/spec/v1beta2/gitrepositories.md +++ b/docs/spec/v1beta2/gitrepositories.md @@ -1,5 +1,7 @@ # Git Repositories + + The `GitRepository` API defines a Source to produce an Artifact for a Git repository revision. @@ -49,7 +51,7 @@ You can run this example by saving the manifest into `gitrepository.yaml`. ```console NAME URL AGE READY STATUS - podinfo https://github.com/stefanprodan/podinfo 5s True stored artifact for revision 'master/132f4e719209eb10b9485302f8593fc0e680f4fc' + podinfo https://github.com/stefanprodan/podinfo 5s True stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' ``` 3. Run `kubectl describe gitrepository podinfo` to see the [Artifact](#artifact) @@ -59,20 +61,21 @@ You can run this example by saving the manifest into `gitrepository.yaml`. ... Status: Artifact: - Checksum: 95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2 + Digest: sha256:95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2 Last Update Time: 2022-02-14T11:23:36Z Path: gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz - Revision: master/132f4e719209eb10b9485302f8593fc0e680f4fc + Revision: master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc + Size: 91318 URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz Conditions: Last Transition Time: 2022-02-14T11:23:36Z - Message: stored artifact for revision 'master/132f4e719209eb10b9485302f8593fc0e680f4fc' + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' Observed Generation: 1 Reason: Succeeded Status: True Type: Ready Last Transition Time: 2022-02-14T11:23:36Z - Message: stored artifact for revision 'master/132f4e719209eb10b9485302f8593fc0e680f4fc' + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' Observed Generation: 1 Reason: Succeeded Status: True @@ -131,6 +134,31 @@ data: password: ``` +#### Bearer token authentication + +To authenticate towards a Git repository over HTTPS using bearer token +authentication (in other words: using a `Authorization: Bearer` header), the referenced +Secret is expected to contain the token in `.data.bearerToken`. + +**Note:** If you are looking to use OAuth tokens with popular servers (e.g. +[GitHub](https://docs.github.com/en/rest/overview/authenticating-to-the-rest-api?apiVersion=2022-11-28#authenticating-with-a-token-generated-by-an-app), +[Bitbucket](https://support.atlassian.com/bitbucket-cloud/docs/using-access-tokens/), +[GitLab](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#clone-using-a-token)), +you should use basic access authentication instead. These servers use basic HTTP +authentication, with the OAuth token as the password. Check the documentation of +your Git server for details. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: bearer-token-auth +type: Opaque +data: + bearerToken: +``` + #### HTTPS Certificate Authority To provide a Certificate Authority to trust while connecting with a Git @@ -209,7 +237,7 @@ is `60s`. `.spec.ref` is an optional field to specify the Git reference to resolve and watch for changes. References are specified in one or more subfields -(`.branch`, `.tag`, `.semver`, `.commit`), with latter listed fields taking +(`.branch`, `.tag`, `.semver`, `.name`, `.commit`), with latter listed fields taking precedence over earlier ones. If not specified, it defaults to a `master` branch reference. @@ -228,8 +256,7 @@ spec: branch: ``` -Using the [`go-git` Git implementation](#git-implementation), this will perform -a shallow clone to only fetch the specified branch. +This will perform a shallow clone to only fetch the specified branch. #### Tag example @@ -269,6 +296,30 @@ spec: This field takes precedence over [`.branch`](#branch-example) and [`.tag`](#tag-example). + +#### Name example + +To Git checkout a specfied [reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References), +use `.spec.ref.name`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + # Ref name format reference: https://git-scm.com/docs/git-check-ref-format#_description + name: +``` + +Valid examples are: `refs/heads/main`, `refs/tags/v0.1.0`, `refs/pull/420/head`, +`refs/merge-requests/1/head`. + +This field takes precedence over [`.branch`](#branch-example), +[`.tag`](#tag-example), and [`.semver`](#semver-example). + #### Commit example To Git checkout a specified commit, use `.spec.ref.commit`: @@ -284,9 +335,9 @@ spec: commit: "" ``` -This field takes precedence over all other fields. Using the [`go-git` Git -implementation](#git-implementation), it can be combined with `.spec.ref.branch` -to perform a shallow clone of the branch, in which the commit must exist: +This field takes precedence over all other fields. It can be combined with +`.spec.ref.branch` to perform a shallow clone of the branch, in which the +commit must exist: ```yaml --- @@ -385,31 +436,13 @@ resume. ### Git implementation -`.spec.gitImplementation` is an optional field to change the client library -implementation used for Git operations (e.g. clone, checkout). The default -value is `go-git`. - -Unless you need support for a specific Git wire protocol functionality not -supported by the default implementation (as documented below), changing the -implementation is generally not recommended as it can come with its own set of -drawbacks. For example, not being able to make use of shallow clones forces the -controller to fetch the whole Git history tree instead of a specific one, -resulting in an increase of disk space and traffic usage. - -| Git Implementation | Shallow Clones | Git Submodules | V2 Protocol Support | -|--------------------|----------------|----------------|---------------------| -| `go-git` | true | true | false | -| `libgit2` | false | false | true | - -Some Git providers like Azure DevOps _require_ the `libgit2` implementation, as -their Git servers provide only support for the -[v2 protocol](https://git-scm.com/docs/protocol-v2). +`.spec.gitImplementation` is deprecated and its value ignored, the git +implementation used across Flux is go-git. #### Optimized Git clones Optimized Git clones decreases resource utilization for GitRepository -reconciliations. It supports both `go-git` and `libgit2` implementations -when cloning repositories using branches or tags. +reconciliations. When enabled, it avoids full Git clone operations by first checking whether the revision of the last stored artifact is still the head of the remote @@ -427,20 +460,13 @@ not affected by this functionality. #### Proxy support When a proxy is configured in the source-controller Pod through the appropriate -environment variables, for example `HTTPS_PROXY`, `NO_PROXY`, etc. There may be -some limitations in the proxy support based on the Git implementation. - -| Git Implementation | HTTP_PROXY | HTTPS_PROXY | NO_PROXY | Self-signed Certs | -|--------------------|------------|-------------|----------|-------------------| -| `go-git` | true | true | true | false |n -| `libgit2` | true | true | true | true | +environment variables, for example `HTTPS_PROXY`, `NO_PROXY`, etc. ### Recurse submodules `.spec.recurseSubmodules` is an optional field to enable the initialization of all submodules within the cloned Git repository, using their default settings. -This option is only available when using the (default) `go-git` [Git -implementation](#git-implementation), and defaults to `false`. +This option defaults to `false`. Note that for most Git providers (e.g. GitHub and GitLab), deploy keys can not be used as reusing a key across multiple repositories is not allowed. You have @@ -496,6 +522,9 @@ repository. The `.sourceignore` file follows [the `.gitignore` pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern entries may overrule [default exclusions](#default-exclusions). +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the repository root or in subdirectories. + #### Ignore spec Another option is to define the exclusions within the GitRepository spec, using @@ -636,9 +665,9 @@ Status: ... Conditions: Last Transition Time: 2022-02-14T09:40:27Z - Message: reconciling new object generation (2) + Message: processing object: new generation 1 -> 2 Observed Generation: 2 - Reason: NewGeneration + Reason: ProgressingWithRetry Status: True Type: Reconciling Last Transition Time: 2022-02-14T09:40:27Z @@ -676,7 +705,7 @@ lists ```console LAST SEEN TYPE REASON OBJECT MESSAGE 2m14s Normal NewArtifact gitrepository/ stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' -36s Normal ArtifactUpToDate gitrepository/ artifact up-to-date with remote revision: 'master/132f4e719209eb10b9485302f8593fc0e680f4fc' +36s Normal ArtifactUpToDate gitrepository/ artifact up-to-date with remote revision: 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' 94s Warning GitOperationFailed gitrepository/ failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" ``` @@ -705,10 +734,11 @@ metadata: name: status: artifact: - checksum: e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b + digest: sha256:e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b lastUpdateTime: "2022-01-29T06:59:23Z" path: gitrepository///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz - revision: master/363a6a8fe6a7f13e05d34c163b0ef02a777da20a + revision: master@sha1:363a6a8fe6a7f13e05d34c163b0ef02a777da20a + size: 91318 url: http://source-controller..svc.cluster.local./gitrepository///363a6a8fe6a7f13e05d34c163b0ef02a777da20a.tar.gz ``` @@ -750,12 +780,13 @@ following is true: - The newly resolved Artifact revision differs from the current Artifact. When the GitRepository is "reconciling", the `Ready` Condition status becomes -`False`, and the controller adds a Condition with the following attributes to -the GitRepository's `.status.conditions`: +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the GitRepository's +`.status.conditions`: - `type: Reconciling` - `status: "True"` -- `reason: NewGeneration` | `reason: NoArtifact` | `reason: NewRevision` +- `reason: Progressing` | `reason: ProgressingWithRetry` If the reconciling state is due to a new revision, an additional Condition is added with the following attributes: @@ -845,7 +876,10 @@ exponential backoff, until it succeeds and the GitRepository is marked as Note that a GitRepository can be [reconciling](#reconciling-gitrepository) while failing at the same time, for example due to a newly introduced -configuration issue in the GitRepository spec. +configuration issue in the GitRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. ### Content Configuration Checksum @@ -854,6 +888,79 @@ configurations of the GitRepository that indicate a change in source and records it in `.status.contentConfigChecksum`. This field is used to determine if the source artifact needs to be rebuilt. +**Deprecation Note:** `contentConfigChecksum` is no longer used and will be +removed in the next API version. The individual components used for generating +content configuration checksum now have explicit fields in the status. This +makes the observations used by the controller for making artifact rebuild +decisions more transparent and easier to debug. + +### Observed Ignore + +The source-controller reports an observed ignore in the GitRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-gitrepository), or stalled due to error +it can not recover from without human intervention. +The value is the same as the [ignore in spec](#ignore). +It indicates the ignore rules used in building the current artifact in storage. +It is also used by the controller to determine if an artifact needs to be +rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + cue + pkg + ... +``` + +### Observed Recurse Submodules + +The source-controller reports an observed recurse submodule in the +GitRepository's `.status.observedRecurseSubmodules`. The observed recurse +submodules is the latest `.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[recurse submodules in spec](#recurse-submodules). It indicates the recurse +submodules configuration used in building the current artifact in storage. It is +also used by the controller to determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedRecurseSubmodules: true + ... +``` + +### Observed Include + +The source-controller reports observed include in the GitRepository's +`.status.observedInclude`. The observed include is the latest +`.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[include in spec](#include). It indicates the include configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedInclude: + - fromPath: deploy/webapp + repository: + name: repo1 + toPath: foo + - fromPath: deploy/secure + repository: + name: repo2 + toPath: bar + ... +``` + ### Observed Generation The source-controller reports an [observed generation][typical-status-properties] diff --git a/docs/spec/v1beta2/helmcharts.md b/docs/spec/v1beta2/helmcharts.md index b423dde6d..3932a9694 100644 --- a/docs/spec/v1beta2/helmcharts.md +++ b/docs/spec/v1beta2/helmcharts.md @@ -1,5 +1,7 @@ # Helm Charts + + The `HelmChart` API defines a Source to produce an Artifact for a Helm chart archive with a set of specific configurations. @@ -43,7 +45,7 @@ In the above example: You can run this example by saving the manifest into `helmchart.yaml`. -**NOTE:** HelmChart is usually used by the helm-controller. Based on the +**Note:** HelmChart is usually used by the helm-controller. Based on the HelmRelease configuration, an associated HelmChart is created by the helm-controller. @@ -65,12 +67,13 @@ helm-controller. ```console Status: - Observed Source Artifact Revision: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Observed Source Artifact Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 Artifact: - Checksum: 6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3 + Digest: sha256:6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3 Last Update Time: 2022-02-13T11:24:10Z Path: helmchart/default/podinfo/podinfo-5.2.1.tgz Revision: 5.2.1 + Size: 14166 URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/podinfo-5.2.1.tgz Conditions: Last Transition Time: 2022-02-13T11:24:10Z @@ -199,6 +202,16 @@ spec: Values files also affect the generated artifact revision, see [artifact](#artifact). +### Ignore missing values files + +`.spec.ignoreMissingValuesFiles` is an optional field to specify whether missing +values files should be ignored rather than be considered errors. It defaults to +`false`. + +When `.spec.valuesFiles` and `.spec.ignoreMissingValuesFiles` are specified, +the `.status.observedValuesFiles` field is populated with the list of values +files that were found and actually contributed to the packaged chart. + ### Reconcile strategy `.spec.reconcileStrategy` is an optional field to specify what enables the @@ -208,7 +221,7 @@ changes in a `HelmRepository`. `Revision` is used for creating a new artifact when the source revision changes in a `GitRepository` or a `Bucket` Source. It defaults to `ChartVersion`. -**NOTE:** If the reconcile strategy is `ChartVersion` and the source reference +**Note:** If the reconcile strategy is `ChartVersion` and the source reference is a `GitRepository` or a `Bucket`, no new chart artifact is produced on updates to the source unless the `version` in `Chart.yaml` is incremented. To produce new chart artifact on change in source revision, set the reconcile strategy to @@ -230,6 +243,11 @@ e.g. `10m0s` to look at the source for updates every 10 minutes. If the `.metadata.generation` of a resource changes (due to e.g. applying a change to the spec), this is handled instantly outside the interval window. +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmChart objects are set +up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + ### Suspend `.spec.suspend` is an optional field to suspend the reconciliation of a @@ -240,6 +258,174 @@ in a new Artifact. When the field is set to `false` or removed, it will resume. For practical information, see [suspending and resuming](#suspending-and-resuming). +### Verification + +**Note:** This feature is available only for Helm charts fetched from an OCI Registry. + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the HelmChart, containing the public keys of trusted authors. For Notation this Secret should also include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of HelmChart hosted in an OCI Registry, create a Kubernetes +secret with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify a HelmChart's signature. +This allows for older HelmCharts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available HelmCharts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying HelmCharts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + interval: 5m + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: ">=6.1.6" + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo +spec: + interval: 1m0s + url: oci://ghcr.io/stefanprodan/charts + type: "oci" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + ## Working with HelmCharts ### Triggering a reconcile @@ -369,12 +555,12 @@ sion matching '9.*' found #### Trace emitted Events -To view events for specific HelmChart(s), `kubectl get events` can be used in -combination with `--field-selector` to list the Events for specific objects. -For example, running +To view events for specific HelmChart(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running ```sh -kubectl get events --field-selector involvedObject.kind=HelmChart,involvedObject.name= +kubectl events --for HelmChart/ ``` lists @@ -457,10 +643,11 @@ metadata: name: status: artifact: - checksum: e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e + digest: sha256:e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e lastUpdateTime: "2022-02-10T18:53:47Z" path: helmchart///-.tgz revision: 6.0.3 + size: 14166 url: http://source-controller.flux-system.svc.cluster.local./helmchart///-.tgz ``` @@ -478,10 +665,11 @@ metadata: name: status: artifact: - checksum: ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52 + digest: sha256:ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52 lastUpdateTime: "2022-02-28T08:07:12Z" path: helmchart///-6.0.3+1.tgz revision: 6.0.3+1 + size: 14166 url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+1.tgz observedGeneration: 1 ... @@ -502,10 +690,11 @@ metadata: name: status: artifact: - checksum: 8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718 + digest: sha256:8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718 lastUpdateTime: "2022-02-28T08:07:12Z" path: helmchart///-6.0.3+4e5cbb7b97d0.tgz revision: 6.0.3+4e5cbb7b97d0 + size: 14166 url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+4e5cbb7b97d0.tgz ``` @@ -536,12 +725,12 @@ following is true: - The newly fetched Artifact revision differs from the current Artifact. When the HelmChart is "reconciling", the `Ready` Condition status becomes -`False`, and the controller adds a Condition with the following attributes to -the HelmChart's `.status.conditions`: +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmChart's `.status.conditions`: - `type: Reconciling` - `status: "True"` -- `reason: NewGeneration` | `reason: NoArtifact` +- `reason: Progressing` | `reason: ProgressingWithRetry` If the reconciling state is due to a new version, it adds an additional Condition with the following attributes: @@ -618,7 +807,10 @@ until it succeeds and the HelmChart is marked as [ready](#ready-helmchart). Note that a HelmChart can be [reconciling](#reconciling-helmchart) while failing at the same time, for example due to a newly introduced -configuration issue in the HelmChart spec. +configuration issue in the HelmChart spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. #### Stalled HelmChart diff --git a/docs/spec/v1beta2/helmrepositories.md b/docs/spec/v1beta2/helmrepositories.md index a77902882..0fd33ed00 100644 --- a/docs/spec/v1beta2/helmrepositories.md +++ b/docs/spec/v1beta2/helmrepositories.md @@ -1,11 +1,13 @@ # Helm Repositories + + There are 2 [Helm repository types](#type) defined by the `HelmRepository` API: - Helm HTTP/S repository, which defines a Source to produce an Artifact for a Helm repository index YAML (`index.yaml`). -- OCI Helm repository, which defines a source that does not produce an Artifact. -Instead a validation of the Helm repository is performed and the outcome is reported in the -`.status.conditions` field. +- OCI Helm repository, which defines a source that does not produce an Artifact. + It's a data container to store the information about the OCI repository that + can be used by [HelmChart](helmcharts.md) to access OCI Helm charts. ## Examples @@ -34,9 +36,9 @@ In the above example: - The source-controller fetches the Helm repository index YAML every five minutes from `https://stefanprodan.github.io/podinfo`, indicated by the `.spec.interval` and `.spec.url` fields. -- The SHA256 sum of the Helm repository index after stable sorting the entries - is used as Artifact revision, reported in-cluster in the - `.status.artifact.revision` field. +- The digest (algorithm defaults to SHA256) of the Helm repository index after + stable sorting the entries is used as Artifact revision, reported in-cluster + in the `.status.artifact.revision` field. - When the current HelmRepository revision differs from the latest fetched revision, it is stored as a new Artifact. - The new Artifact is reported in the `.status.artifact` field. @@ -53,7 +55,7 @@ You can run this example by saving the manifest into `helmrepository.yaml`. ```console NAME URL AGE READY STATUS - podinfo https://stefanprodan.github.io/podinfo 4s True stored artifact for revision '83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + podinfo https://stefanprodan.github.io/podinfo 4s True stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' ``` 3. Run `kubectl describe helmrepository podinfo` to see the [Artifact](#artifact) @@ -63,20 +65,21 @@ You can run this example by saving the manifest into `helmrepository.yaml`. ... Status: Artifact: - Checksum: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 Last Update Time: 2022-02-04T09:55:58Z Path: helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml - Revision: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Size: 40898 URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml Conditions: Last Transition Time: 2022-02-04T09:55:58Z - Message: stored artifact for revision '83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' Observed Generation: 1 Reason: Succeeded Status: True Type: Ready Last Transition Time: 2022-02-04T09:55:58Z - Message: stored artifact for revision '83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' Observed Generation: 1 Reason: Succeeded Status: True @@ -110,9 +113,11 @@ In the above example: - A HelmRepository named `podinfo` is created, indicated by the `.metadata.name` field. -- The source-controller performs the Helm repository url validation i.e. the url -is a valid OCI registry url, every five minutes with the information indicated by the -`.spec.interval` and `.spec.url` fields. +- A HelmChart that refers to this HelmRepository uses the URL in the `.spec.url` + field to access the OCI Helm chart. + +**NOTE:** The `.spec.interval` field is only used by the `default` Helm +repository and is ignored for any value in `oci` Helm repository. You can run this example by saving the manifest into `helmrepository.yaml`. @@ -126,25 +131,12 @@ You can run this example by saving the manifest into `helmrepository.yaml`. ```console NAME URL AGE READY STATUS - podinfo oci://ghcr.io/stefanprodan/charts 3m22s True Helm repository "podinfo" is ready + podinfo oci://ghcr.io/stefanprodan/charts 3m22s ``` -3. Run `kubectl describe helmrepository podinfo` to see the [Conditions](#conditions) -in the HelmRepository's Status: - - ```console - ... - Status: - Conditions: - Last Transition Time: 2022-05-12T14:02:12Z - Message: Helm repository "podinfo" is ready - Observed Generation: 1 - Reason: Succeeded - Status: True - Type: Ready - Observed Generation: 1 - Events: - ``` +Because the OCI Helm repository is a data container, there's nothing to report +for `READY` and `STATUS` columns above. The existence of the object can be +considered to be ready for use. ## Writing a HelmRepository spec @@ -155,17 +147,221 @@ valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working- A HelmRepository also needs a [`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). - ### Type `.spec.type` is an optional field that specifies the Helm repository type. Possible values are `default` for a Helm HTTP/S repository, or `oci` for an OCI Helm repository. +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used +for authentication purposes. + +Supported options are: +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when static credentials +are used for authentication. If you do not specify `.spec.provider`, it defaults +to `generic`. + +**Note**: The provider field is supported only for Helm OCI repositories. The `spec.type` +field must be set to `oci`. + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS worker +node IAM role or IAM Role for Service Accounts (IRSA), and by extension gain access +to ECR. + +##### EKS Worker Node IAM Role + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +##### IAM Role for Service Accounts (IRSA) + +When using IRSA to enable access to ECR, add the following patch to your bootstrap +repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity, Kubelet Managed +Identity or Azure Active Directory pod-managed identity (aad-pod-identity), and +by extension gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running on +it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Azure Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +##### Deprecated: AAD Pod Identity + +**Warning:** The AAD Pod Identity project will be archived in +[September 2023](https://github.com/Azure/aad-pod-identity#-announcement), +and you are advised to use Workload Identity instead. + +When using aad-pod-identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + - op: add + path: /spec/template/metadata/labels/aadpodidbinding + value: + target: + kind: Deployment + name: source-controller +``` + +When using pod-managed identity on an AKS cluster, AAD Pod Identity has to be used +to give the `source-controller` pod access to the ACR. To do this, you have to install +`aad-pod-identity` on your cluster, create a managed identity that has access to the +container registry (this can also be the Kubelet identity if it has `AcrPull` role +assignment on the ACR), create an `AzureIdentity` and `AzureIdentityBinding` that describe +the managed identity and then label the `source-controller` deployment with the name of the +AzureIdentity as shown in the patch above. Please take a look at [this guide](https://azure.github.io/aad-pod-identity/docs/) +or [this one](https://docs.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) +if you want to use AKS pod-managed identities add-on that is in preview. + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes or +Workload Identity, and by extension gain access to GCR or Artifact Registry. + +##### Access Scopes + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and Artifact Registry, +source-controller running on it will also have access to them. + +##### GKE Workload Identity + +When using Workload Identity to enable access to GCR or Artifact Registry, add the +following patch to your bootstrap repository, in the `flux-system/kustomization.yaml` +file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using Google Container Registry service, +the needed permission is instead `storage.objects.list` which can be bound as part +of the Container Registry Service Agent role. Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure non-TLS connections when fetching Helm chart OCI artifacts. + +**Note**: The insecure field is supported only for Helm OCI repositories. +The `spec.type` field must be set to `oci`. + ### Interval -`.spec.interval` is a required field that specifies the interval which the -Helm repository index must be consulted at. +**Note:** This field is ineffectual for [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.interval` is a an optional field that specifies the interval which the +Helm repository index must be consulted at. When not set, the default value is +`1m`. After successfully reconciling a HelmRepository object, the source-controller requeues the object for inspection after the specified interval. The value @@ -175,6 +371,11 @@ e.g. `10m0s` to fetch the HelmRepository index YAML every 10 minutes. If the `.metadata.generation` of a resource changes (due to e.g. applying a change to the spec), this is handled instantly outside the interval window. +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmRepository objects +are set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + ### URL `.spec.url` is a required field that depending on the [type of the HelmRepository object](#type) @@ -186,11 +387,14 @@ For Helm repositories which require authentication, see [Secret reference](#secr ### Timeout +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + `.spec.timeout` is an optional field to specify a timeout for the fetch operation. The value must be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), -e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value -is `60s`. +e.g. `1m30s` for a timeout of one minute and thirty seconds. When not set, the +default value is `1m`. ### Secret reference @@ -225,8 +429,8 @@ metadata: name: example-user namespace: default stringData: - username: example - password: 123456 + username: "user-123456" + password: "pass-123456" ``` OCI Helm repository example: @@ -240,7 +444,7 @@ metadata: namespace: default spec: interval: 5m0s - url: oci://ghcr.io/stefanprodan/charts + url: oci://ghcr.io/my-user/my-private-repo type: "oci" secretRef: name: oci-creds @@ -251,19 +455,51 @@ metadata: name: oci-creds namespace: default stringData: - username: example - password: 123456 + username: "user-123456" + password: "pass-123456" ``` -#### TLS authentication +For OCI Helm repositories, Kubernetes secrets of type [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types) are also supported. +It is possible to create one such secret with `kubectl create secret docker-registry` +or using the Flux CLI: -**Note:** TLS authentication is not yet supported by OCI Helm repositories. +```yaml +flux create secret oci ghcr-auth \ + --url=ghcr.io \ + --username=flux \ + --password=${GITHUB_PAT} +``` -To provide TLS credentials to use while connecting with the Helm repository, -the referenced Secret is expected to contain `.data.certFile` and -`.data.keyFile`, and/or `.data.caFile` values. +**Warning:** Support for specifying TLS authentication data using this API has been +deprecated. Please use [`.spec.certSecretRef`](#cert-secret-reference) instead. +If the controller uses the secret specified by this field to configure TLS, then +a deprecation warning will be logged. -For example: +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: ```yaml --- @@ -275,7 +511,7 @@ metadata: spec: interval: 5m0s url: https://example.com - secretRef: + certSecretRef: name: example-tls --- apiVersion: v1 @@ -283,11 +519,12 @@ kind: Secret metadata: name: example-tls namespace: default +type: kubernetes.io/tls # or Opaque data: - certFile: - keyFile: + tls.crt: + tls.key: # NOTE: Can be supplied without the above values - caFile: + ca.crt: ``` ### Pass credentials @@ -303,6 +540,9 @@ to HTTP/S Helm repositories. ### Suspend +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + `.spec.suspend` is an optional field to suspend the reconciliation of a HelmRepository. When set to `true`, the controller will stop reconciling the HelmRepository, and changes to the resource or the Helm repository index will @@ -313,6 +553,10 @@ For practical information, see [suspending and resuming](#suspending-and-resuming). ## Working with HelmRepositories + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, once created, they +are ready to used by [HelmCharts](helmcharts.md). ### Triggering a reconcile @@ -414,6 +658,10 @@ flux resume source helm ### Debugging a HelmRepository +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, they are static +objects that don't require debugging if valid. + There are several ways to gather information about a HelmRepository for debugging purposes. @@ -456,12 +704,12 @@ Events: #### Trace emitted Events -To view events for specific HelmRepository(s), `kubectl get events` can be used in -combination with `--field-sector` to list the Events for specific objects. -For example, running +To view events for specific HelmRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running ```sh -kubectl get events --field-selector involvedObject.kind=HelmRepository,involvedObject.name= +kubectl events --for HelmRepository/ ``` lists @@ -470,7 +718,7 @@ lists LAST SEEN TYPE REASON OBJECT MESSAGE 107s Warning Failed helmrepository/ failed to construct Helm client: scheme "invalid" not supported 7s Normal NewArtifact helmrepository/ fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' -3s Normal ArtifactUpToDate helmrepository/ artifact up-to-date with remote revision: '83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' +3s Normal ArtifactUpToDate helmrepository/ artifact up-to-date with remote revision: 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' ``` Besides being reported in Events, the reconciliation errors are also logged by @@ -479,9 +727,11 @@ specific HelmRepository, e.g. `flux logs --level=error --kind=HelmRepository --n ## HelmRepository Status -### Artifact +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), they do not contain any information in the +status. -**Note:** This section does not apply to [OCI Helm Repositories](#oci-helm-repositories), they do not emit artifacts. +### Artifact The HelmRepository reports the last fetched repository index as an Artifact object in the `.status.artifact` of the resource. @@ -500,10 +750,11 @@ metadata: name: status: artifact: - checksum: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 lastUpdateTime: "2022-02-04T09:55:58Z" path: helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml - revision: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + size: 40898 url: http://source-controller.flux-system.svc.cluster.local./helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml ``` @@ -522,9 +773,6 @@ and reports `Reconciling` and `Stalled` conditions where applicable to provide better (timeout) support to solutions polling the HelmRepository to become `Ready`. - OCI Helm repositories use only `Reconciling`, `Ready`, `FetchFailed`, and `Stalled` - condition types. - #### Reconciling HelmRepository The source-controller marks a HelmRepository as _reconciling_ when one of the following @@ -537,12 +785,12 @@ is true: - The newly fetched Artifact revision differs from the current Artifact. When the HelmRepository is "reconciling", the `Ready` Condition status becomes -`False`, and the controller adds a Condition with the following attributes to -the HelmRepository's `.status.conditions`: +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmRepository's `.status.conditions`: - `type: Reconciling` - `status: "True"` -- `reason: NewGeneration` | `reason: NoArtifact` | `reason: NewRevision` +- `reason: Progressing` | `reason: ProgressingWithRetry` If the reconciling state is due to a new revision, it adds an additional Condition with the following attributes: @@ -621,7 +869,10 @@ until it succeeds and the HelmRepository is marked as [ready](#ready-helmreposit Note that a HelmRepository can be [reconciling](#reconciling-helmrepository) while failing at the same time, for example due to a newly introduced -configuration issue in the HelmRepository spec. +configuration issue in the HelmRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. #### Stalled HelmRepository @@ -658,5 +909,6 @@ annotation value it acted on in the `.status.lastHandledReconcileAt` field. For practical information about this field, see [triggering a reconcile](#triggering-a-reconcile). +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail [typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties [kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1beta2/ocirepositories.md b/docs/spec/v1beta2/ocirepositories.md index d540d8131..eb5de4c5f 100644 --- a/docs/spec/v1beta2/ocirepositories.md +++ b/docs/spec/v1beta2/ocirepositories.md @@ -1,5 +1,7 @@ # OCI Repositories + + The `OCIRepository` API defines a Source to produce an Artifact for an OCI repository. @@ -31,7 +33,7 @@ In the above example: by the `.spec.interval` field. - It pulls the `latest` tag of the `ghcr.io/stefanprodan/manifests/podinfo` repository, indicated by the `.spec.ref.tag` and `.spec.url` fields. -- The resolved SHA256 digest is used as the Artifact +- The resolved tag and SHA256 digest is used as the Artifact revision, reported in-cluster in the `.status.artifact.revision` field. - When the current OCIRepository digest differs from the latest fetched digest, a new Artifact is archived. @@ -49,7 +51,7 @@ You can run this example by saving the manifest into `ocirepository.yaml`. ```console NAME URL AGE READY STATUS - podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with digest '3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' ``` 3. Run `kubectl describe ocirepository podinfo` to see the [Artifact](#artifact) @@ -59,20 +61,21 @@ You can run this example by saving the manifest into `ocirepository.yaml`. ... Status: Artifact: - Checksum: d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b + Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b Last Update Time: 2022-06-14T11:23:36Z Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz - Revision: 3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Revision: latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Size: 1105 URL: http://source-controller.flux-system.svc.cluster.local./ocirepository/oci/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz Conditions: Last Transition Time: 2022-06-14T11:23:36Z - Message: stored artifact for digest '3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' Observed Generation: 1 Reason: Succeeded Status: True Type: Ready Last Transition Time: 2022-06-14T11:23:36Z - Message: stored artifact for digest '3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' Observed Generation: 1 Reason: Succeeded Status: True @@ -82,7 +85,7 @@ You can run this example by saving the manifest into `ocirepository.yaml`. Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal NewArtifact 62s source-controller stored artifact with digest '3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' + Normal NewArtifact 62s source-controller stored artifact with revision 'latest/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' ``` ## Writing an OCIRepository spec @@ -154,14 +157,23 @@ to the IAM role when using IRSA. #### Azure -The `azure` provider can be used to authenticate automatically using kubelet -managed identity or Azure Active Directory pod-managed identity (aad-pod-identity), -and by extension gain access to ACR. +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity When the kubelet managed identity has access to ACR, source-controller running on it will also have access to ACR. -When using aad-pod-identity to enable access to ECR, add the following patch to +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to your bootstrap repository, in the `flux-system/kustomization.yaml` file: ```yaml @@ -171,25 +183,36 @@ resources: - gotk-components.yaml - gotk-sync.yaml patches: - - patch: | - - op: add - path: /spec/template/metadata/labels/aadpodidbinding - value: - target: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 kind: Deployment - name: source-controller -``` - -When using pod-managed identity on an AKS cluster, AAD Pod Identity -has to be used to give the `source-controller` pod access to the ACR. -To do this, you have to install `aad-pod-identity` on your cluster, create a managed identity -that has access to the container registry (this can also be the Kubelet identity -if it has `AcrPull` role assignment on the ACR), create an `AzureIdentity` and `AzureIdentityBinding` -that describe the managed identity and then label the `source-controller` pods -with the name of the AzureIdentity as shown in the patch above. Please take a look -at [this guide](https://azure.github.io/aad-pod-identity/docs/) or -[this one](https://docs.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) -if you want to use AKS pod-managed identities add-on that is in preview. + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). #### GCP @@ -220,7 +243,7 @@ patches: target: kind: ServiceAccount name: source-controller -``` +``` The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` that is located under the Artifact Registry Reader role. If you are using @@ -251,42 +274,109 @@ fetch the image pull secrets attached to the service account and use them for au **Note:** that for a publicly accessible image repository, you don't need to provide a `secretRef` nor `serviceAccountName`. -### TLS Certificates +### Cert secret reference -`.spec.certSecretRef` field names a secret with TLS certificate data. This is for two separate -purposes: +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: -- to provide a client certificate and private key, if you use a certificate to authenticate with - the container registry; and, -- to provide a CA certificate, if the registry uses a self-signed certificate. +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. -These will often go together, if you are hosting a container registry yourself. All the files in the -secret are expected to be [PEM-encoded][pem-encoding]. This is an ASCII format for certificates and -keys; `openssl` and such tools will typically give you an option of PEM output. +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. -Assuming you have obtained a certificate file and private key and put them in the files `client.crt` -and `client.key` respectively, you can create a secret with `kubectl` like this: +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: -```bash -kubectl create secret generic tls-certs \ - --from-file=certFile=client.crt \ - --from-file=keyFile=client.key +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt ``` -You could also [prepare a secret and encrypt it][sops-guide]; the important bit is that the data -keys in the secret are `certFile` and `keyFile`. +Example usage: -If you have a CA certificate for the client to use, the data key for that is `caFile`. Adapting the -previous example, if you have the certificate in the file `ca.crt`, and the client certificate and -key as before, the whole command would be: +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: oci://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` -```bash -kubectl create secret generic tls-certs \ - --from-file=certFile=client.crt \ - --from-file=keyFile=client.key \ - --from-file=caFile=ca.crt +**Warning:** Support for the `caFile`, `certFile` and `keyFile` keys have been +deprecated. If you have any Secrets using these keys and specified in an +OCIRepository, the controller will log a deprecation warning. + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu ``` +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + ### Interval `.spec.interval` is a required field that specifies the interval at which the @@ -300,6 +390,11 @@ e.g. `10m0s` to reconcile the object every 10 minutes. If the `.metadata.generation` of a resource changes (due to e.g. a change to the spec), this is handled instantly outside the interval window. +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple OCIRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + ### Timeout `.spec.timeout` is an optional field to specify a timeout for OCI operations @@ -351,6 +446,37 @@ spec: This field takes precedence over [`.tag`](#tag-example). +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + #### Digest example To pull a specific digest, use `.spec.ref.digest`: @@ -364,10 +490,39 @@ metadata: spec: ref: digest: "sha256:" -``` +``` This field takes precedence over all other fields. +### Layer selector + +`spec.layerSelector` is an optional field to specify which layer should be extracted from the OCI Artifact. +If not specified, the controller will extract the first layer found in the artifact. + +To extract a layer matching a specific +[OCI media type](https://github.com/opencontainers/image-spec/blob/v1.0.2/media-types.md): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + layerSelector: + mediaType: "application/deployment.content.v1.tar+gzip" + operation: extract # can be 'extract' or 'copy', defaults to 'extract' +``` + +If the layer selector matches more than one layer, the first layer matching the specified media type will be used. +Note that the selected OCI layer must be +[compressed](https://github.com/opencontainers/image-spec/blob/v1.0.2/layer.md#gzip-media-types) +in the `tar+gzip` format. + +When `.spec.layerSelector.operation` is set to `copy`, instead of extracting the +compressed layer, the controller copies the tarball as-is to storage, thus +keeping the original content unaltered. + ### Ignore `.spec.ignore` is an optional field to specify rules in [the `.gitignore` @@ -379,6 +534,159 @@ list](#default-exclusions), and may overrule the [`.sourceignore` file exclusions](#sourceignore-file). See [excluding files](#excluding-files) for more information. +### Verification + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available OCI artifacts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying artifacts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: podinfo +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + ### Suspend `.spec.suspend` is an optional field to suspend the reconciliation of a @@ -413,6 +721,16 @@ spec: /deploy/**/*.txt ``` +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the artifact. +The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern +entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the artifact root or in subdirectories. + ### Triggering a reconcile To manually tell the source-controller to reconcile a OCIRepository outside the @@ -437,7 +755,7 @@ flux reconcile source oci ### Waiting for `Ready` When a change is applied, it is possible to wait for the OCIRepository to reach -a [ready state](#ready-gitrepository) using `kubectl`: +a [ready state](#ready-ocirepository) using `kubectl`: ```sh kubectl wait gitrepository/ --for=condition=ready --timeout=1m @@ -529,9 +847,9 @@ Status: ... Conditions: Last Transition Time: 2022-02-14T09:40:27Z - Message: reconciling new object generation (2) + Message: processing object: new generation 1 -> 2 Observed Generation: 2 - Reason: NewGeneration + Reason: ProgressingWithRetry Status: True Type: Reconciling Last Transition Time: 2022-02-14T09:40:27Z @@ -556,20 +874,20 @@ Events: #### Trace emitted Events -To view events for specific OCIRepository(s), `kubectl get events` can be used -in combination with `--field-sector` to list the Events for specific objects. -For example, running +To view events for specific OCIRepository(s), `kubectl events` can be used +in combination with `--for` to list the Events for specific objects. For +example, running ```sh -kubectl get events --field-selector involvedObject.kind=OCIRepository,involvedObject.name= +kubectl events --for OCIRepository/ ``` lists ```console LAST SEEN TYPE REASON OBJECT MESSAGE -2m14s Normal NewArtifact ocirepository/ stored artifact for digest '3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' -36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote digest: '3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +2m14s Normal NewArtifact ocirepository/ stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote revision: 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' 94s Warning OCIOperationFailed ocirepository/ failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" ``` @@ -585,7 +903,7 @@ specific OCIRepository, e.g. The OCIRepository reports the latest synchronized state from the OCI repository as an Artifact object in the `.status.artifact` of the resource. -The `.status.artifact.revision` holds the SHA256 digest of the upstream OCI artifact. +The `.status.artifact.revision` holds the tag and SHA256 digest of the upstream OCI artifact. The `.status.artifact.metadata` holds the upstream OCI artifact metadata such as the [OpenContainers standard annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md). @@ -607,14 +925,15 @@ metadata: name: status: artifact: - checksum: 9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 + digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 lastUpdateTime: "2022-08-08T09:35:45Z" metadata: org.opencontainers.image.created: "2022-08-08T12:31:41+03:00" org.opencontainers.image.revision: 6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872 org.opencontainers.image.source: https://github.com/stefanprodan/podinfo.git path: ocirepository///.tar.gz - revision: + revision: @ + size: 1105 url: http://source-controller..svc.cluster.local./ocirepository///.tar.gz ``` @@ -656,12 +975,12 @@ following is true: - The newly resolved Artifact digest differs from the current Artifact. When the OCIRepository is "reconciling", the `Ready` Condition status becomes -`False`, and the controller adds a Condition with the following attributes to -the OCIRepository's `.status.conditions`: +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the OCIRepository's `.status.conditions`: - `type: Reconciling` - `status: "True"` -- `reason: NewGeneration` | `reason: NoArtifact` | `reason: NewRevision` +- `reason: Progressing` | `reason: ProgressingWithRetry` If the reconciling state is due to a new revision, an additional Condition is added with the following attributes: @@ -693,8 +1012,8 @@ following attributes in the OCIRepository's `.status.conditions`: - `reason: Succeeded` This `Ready` Condition will retain a status value of `"True"` until the -OCIRepository is marked as [reconciling](#reconciling-gitrepository), or e.g. a -[transient error](#failed-gitrepository) occurs due to a temporary network issue. +OCIRepository is marked as [reconciling](#reconciling-ocirepository), or e.g. a +[transient error](#failed-ocirepository) occurs due to a temporary network issue. When the OCIRepository Artifact is archived in the controller's Artifact storage, the controller sets a Condition with the following attributes in the @@ -734,6 +1053,14 @@ and is only present on the OCIRepository while the status value is `"True"`. There may be more arbitrary values for the `reason` field to provide accurate reason for a condition. +In addition to the above Condition types, when the signature +[verification](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "False"` +- `reason: VerificationError` + While the OCIRepository has one or more of these Conditions, the controller will continue to attempt to produce an Artifact for the resource with an exponential backoff, until it succeeds and the OCIRepository is marked as @@ -741,7 +1068,10 @@ exponential backoff, until it succeeds and the OCIRepository is marked as Note that a OCIRepository can be [reconciling](#reconciling-ocirepository) while failing at the same time, for example due to a newly introduced -configuration issue in the OCIRepository spec. +configuration issue in the OCIRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. ### Content Configuration Checksum @@ -750,6 +1080,53 @@ configurations of the OCIRepository that indicate a change in source and records it in `.status.contentConfigChecksum`. This field is used to determine if the source artifact needs to be rebuilt. +**Deprecation Note:** `contentConfigChecksum` is no longer used and will be +removed in the next API version. The individual components used for generating +content configuration checksum now have explicit fields in the status. This +makes the observations used by the controller for making artifact rebuild +decisions more transparent and easier to debug. + +### Observed Ignore + +The source-controller reports an observed ignore in the OCIRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-ocirepository), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. It is also used by the controller to determine if +an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Layer Selector + +The source-controller reports an observed layer selector in the OCIRepository's +`.status.observedLayerSelector`. The observed layer selector is the latest +`.spec.layerSelector` value which resulted in a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human intervention. +The value is the same as the [layer selector in spec](#layer-selector). +It indicates the layer selection configuration used in building the current +artifact in storage. It is also used by the controller to determine if an +artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedLayerSelector: + mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip + operation: copy + ... +``` + ### Observed Generation The source-controller reports an [observed generation][typical-status-properties] @@ -769,6 +1146,6 @@ reconcile](#triggering-a-reconcile). [typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties [kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus [image-pull-secrets]: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod -[image-auto-provider-secrets]: https://fluxcd.io/docs/guides/image-update/#imagerepository-cloud-providers-authentication +[image-auto-provider-secrets]: https://fluxcd.io/flux/guides/image-update/#imagerepository-cloud-providers-authentication [pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail -[sops-guide]: https://fluxcd.io/docs/guides/mozilla-sops/ +[sops-guide]: https://fluxcd.io/flux/guides/mozilla-sops/ diff --git a/go.mod b/go.mod index 1991567ee..21c15753e 100644 --- a/go.mod +++ b/go.mod @@ -1,281 +1,429 @@ module github.com/fluxcd/source-controller -go 1.18 +go 1.25.0 replace github.com/fluxcd/source-controller/api => ./api -// A temporary fork of git2go was created to enable use -// of libgit2 without thread support to fix: -// fluxcd/image-automation-controller/#339. -// -// This can be removed once libgit2/git2go#918 is merged. -// -// The fork automatically releases new patches based on upstream: -// https://github.com/pjbgf/git2go/commit/d72e39cdc20f7fe014ba73072b01ba7b569e9253 -replace github.com/libgit2/git2go/v33 => github.com/pjbgf/git2go/v33 v33.0.9-nothread-check +// Replace digest lib to master to gather access to BLAKE3. +// xref: https://github.com/opencontainers/go-digest/pull/66 +replace github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be require ( - cloud.google.com/go/storage v1.23.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 - github.com/Masterminds/semver/v3 v3.1.1 - // github.com/ProtonMail/go-crypto is a fork of golang.org/x/crypto - // maintained by the ProtonMail team to continue to support the openpgp - // module, after the Go team decided to no longer maintain it. - // When in doubt (and not using openpgp), use /x/crypto. - github.com/ProtonMail/go-crypto v0.0.0-20220623141421-5afb4c282135 - github.com/cyphar/filepath-securejoin v0.2.3 - github.com/darkowlzz/controller-check v0.0.0-20220325122359-11f5827b7981 - github.com/distribution/distribution/v3 v3.0.0-20220729163034-26163d82560f - github.com/docker/cli v20.10.17+incompatible - github.com/docker/go-units v0.4.0 - github.com/elazarl/goproxy v0.0.0-20220529153421-8ea89ba92021 - github.com/fluxcd/gitkit v0.5.1 - github.com/fluxcd/pkg/apis/meta v0.14.2 - github.com/fluxcd/pkg/gittestserver v0.5.4 - github.com/fluxcd/pkg/gitutil v0.1.0 - github.com/fluxcd/pkg/helmtestserver v0.7.4 - github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/masktoken v0.0.1 - github.com/fluxcd/pkg/oci v0.3.0 - github.com/fluxcd/pkg/runtime v0.16.2 - github.com/fluxcd/pkg/ssh v0.5.0 - github.com/fluxcd/pkg/testserver v0.2.0 - github.com/fluxcd/pkg/untar v0.1.0 - github.com/fluxcd/pkg/version v0.1.0 - github.com/fluxcd/source-controller/api v0.26.1 - github.com/go-git/go-billy/v5 v5.3.1 - github.com/go-git/go-git/v5 v5.4.2 - github.com/go-logr/logr v1.2.3 - github.com/google/uuid v1.3.0 - github.com/libgit2/git2go/v33 v33.0.9 - github.com/minio/minio-go/v7 v7.0.31 - github.com/onsi/gomega v1.19.0 - github.com/otiai10/copy v1.7.0 + cloud.google.com/go/compute/metadata v0.8.0 + cloud.google.com/go/storage v1.56.1 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 + github.com/Masterminds/semver/v3 v3.4.0 + github.com/cyphar/filepath-securejoin v0.4.1 + github.com/distribution/distribution/v3 v3.0.0 + github.com/docker/cli v28.4.0+incompatible + github.com/docker/go-units v0.5.0 + github.com/elazarl/goproxy v1.7.2 + github.com/fluxcd/cli-utils v0.36.0-flux.15 + github.com/fluxcd/pkg/apis/event v0.19.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + github.com/fluxcd/pkg/artifact v0.3.0 + github.com/fluxcd/pkg/auth v0.31.0 + github.com/fluxcd/pkg/cache v0.11.0 + github.com/fluxcd/pkg/git v0.36.0 + github.com/fluxcd/pkg/git/gogit v0.40.0 + github.com/fluxcd/pkg/gittestserver v0.20.0 + github.com/fluxcd/pkg/helmtestserver v0.30.0 + github.com/fluxcd/pkg/http/transport v0.7.0 + github.com/fluxcd/pkg/masktoken v0.8.0 + github.com/fluxcd/pkg/oci v0.56.0 + github.com/fluxcd/pkg/runtime v0.84.0 + github.com/fluxcd/pkg/sourceignore v0.14.0 + github.com/fluxcd/pkg/ssh v0.21.0 + github.com/fluxcd/pkg/tar v0.14.0 + github.com/fluxcd/pkg/testserver v0.13.0 + github.com/fluxcd/pkg/version v0.10.0 + github.com/fluxcd/source-controller/api v1.7.0 + github.com/foxcpp/go-mockdns v1.1.0 + github.com/go-git/go-billy/v5 v5.6.2 + github.com/go-git/go-git/v5 v5.16.2 + github.com/go-logr/logr v1.4.3 + github.com/google/go-containerregistry v0.20.6 + github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 + github.com/google/uuid v1.6.0 + github.com/minio/minio-go/v7 v7.0.94 + github.com/notaryproject/notation-core-go v1.3.0 + github.com/notaryproject/notation-go v1.3.2 + github.com/onsi/gomega v1.38.2 + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/ory/dockertest/v3 v3.12.0 + github.com/otiai10/copy v1.14.1 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 - github.com/prometheus/client_golang v1.12.2 - github.com/spf13/pflag v1.0.5 - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/net v0.0.0-20220708220712-1185a9018129 - golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - google.golang.org/api v0.86.0 - gotest.tools v2.2.0+incompatible - helm.sh/helm/v3 v3.9.1 - k8s.io/api v0.24.2 - k8s.io/apimachinery v0.24.2 - k8s.io/client-go v0.24.2 - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 - sigs.k8s.io/cli-utils v0.31.2 - sigs.k8s.io/controller-runtime v0.11.2 - sigs.k8s.io/yaml v1.3.0 + github.com/prometheus/client_golang v1.23.0 + github.com/sigstore/cosign/v2 v2.5.2 + github.com/sigstore/sigstore v1.9.5 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/pflag v1.0.10 + golang.org/x/crypto v0.41.0 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.16.0 + google.golang.org/api v0.248.0 + helm.sh/helm/v3 v3.19.0 + k8s.io/api v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/client-go v0.34.0 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + oras.land/oras-go/v2 v2.6.0 + sigs.k8s.io/controller-runtime v0.22.1 + sigs.k8s.io/yaml v1.6.0 ) require ( - github.com/google/go-containerregistry v0.10.0 - github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220712174516-ddd39fb9c385 -) - -// Fix CVE-2022-28948 -replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 - -// Fix CVE-2022-1996 (for v2, Go Modules incompatible) -replace github.com/emicklei/go-restful => github.com/emicklei/go-restful v2.16.0+incompatible - -// Fix CVE-2022-31030 -replace github.com/containerd/containerd => github.com/containerd/containerd v1.6.6 - -require ( - cloud.google.com/go v0.102.1 // indirect - cloud.google.com/go/compute v1.7.0 // indirect - cloud.google.com/go/iam v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.16.5 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.27 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect - github.com/BurntSushi/toml v1.0.0 // indirect - github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect + github.com/Azure/go-autorest/autorest v0.11.30 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.2 // indirect + github.com/Azure/go-autorest/tracing v0.6.1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.2 // indirect - github.com/Masterminds/squirrel v1.5.3 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect - github.com/acomagu/bufpipe v1.0.3 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect - github.com/aws/aws-sdk-go v1.44.53 // indirect - github.com/aws/aws-sdk-go-v2 v1.16.4 // indirect - github.com/aws/aws-sdk-go-v2/config v1.15.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.6 // indirect - github.com/aws/smithy-go v1.11.2 // indirect - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect + github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect + github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect + github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect + github.com/alibabacloud-go/debug v1.0.0 // indirect + github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect + github.com/alibabacloud-go/openapi-util v0.1.0 // indirect + github.com/alibabacloud-go/tea v1.2.1 // indirect + github.com/alibabacloud-go/tea-utils v1.4.5 // indirect + github.com/alibabacloud-go/tea-xml v1.1.3 // indirect + github.com/aliyun/credentials-go v1.3.2 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2 v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.10 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bshuster-repo/logrus-logstash-hook v1.0.2 // indirect - github.com/bugsnag/bugsnag-go v2.1.2+incompatible // indirect - github.com/bugsnag/panicwrap v1.3.4 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 // indirect - github.com/containerd/containerd v1.6.6 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 // indirect + github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect + github.com/buildkite/agent/v3 v3.98.2 // indirect + github.com/buildkite/go-pipeline v0.13.3 // indirect + github.com/buildkite/interpolate v0.1.5 // indirect + github.com/buildkite/roko v1.3.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect + github.com/clbanning/mxj/v2 v2.7.0 // indirect + github.com/cloudflare/circl v1.6.1 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect + github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/continuity v0.4.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/coreos/go-oidc/v3 v3.15.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.17+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/emicklei/go-restful v2.15.0+incompatible // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-errors/errors v1.0.1 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-gorp/gorp/v3 v3.0.2 // indirect - github.com/go-logr/zapr v1.2.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.21.1 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fluxcd/gitkit v0.6.0 // indirect + github.com/fluxcd/pkg/apis/acl v0.9.0 // indirect + github.com/fluxcd/pkg/lockedfile v0.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect + github.com/go-chi/chi v4.1.2+incompatible // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-ldap/ldap/v3 v3.4.10 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-piv/piv-go/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/golang-jwt/jwt/v4 v4.4.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/gomodule/redigo v1.8.2 // indirect - github.com/google/btree v1.0.1 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.8 // indirect - github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20220523143934-b17c48b086b7 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f // indirect + github.com/google/go-github/v72 v72.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect - github.com/googleapis/go-type-adapters v1.0.0 // indirect - github.com/gorilla/handlers v1.5.1 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.1 // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/in-toto/attestation v1.1.1 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.15.4 // indirect - github.com/klauspost/cpuid v1.3.1 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lib/pq v1.10.6 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect + github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/minio/md5-simd v1.1.0 // indirect - github.com/minio/sha256-simd v0.1.1 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/dns v1.1.58 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/crc64nvme v1.0.1 // indirect + github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/locker v1.0.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/morikuni/aec v1.0.0 // indirect + github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/notaryproject/notation-plugin-framework-go v1.0.0 // indirect + github.com/notaryproject/tspclient-go v1.0.0 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/oleiade/reflections v1.1.0 // indirect + github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a // indirect + github.com/opencontainers/runc v1.2.4 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/otiai10/mint v1.6.3 // indirect + github.com/pborman/uuid v1.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pjbgf/sha1cd v0.4.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/rs/xid v1.2.1 // indirect - github.com/rubenv/sql-migrate v1.1.2 // indirect - github.com/russross/blackfriday v1.6.0 // indirect - github.com/sergi/go-diff v1.2.0 // indirect - github.com/shopspring/decimal v1.2.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - github.com/spf13/cobra v1.5.0 // indirect - github.com/stretchr/testify v1.7.4 // indirect - github.com/vbatts/tar-split v0.11.2 // indirect - github.com/xanzy/ssh-agent v0.3.1 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect + github.com/redis/go-redis/v9 v9.8.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect + github.com/sigstore/protobuf-specs v0.4.3 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore-go v1.0.0 // indirect + github.com/sigstore/timestamp-authority v1.2.8 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/viper v1.20.1 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.1.1 // indirect + github.com/tinylib/msgp v1.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/tjfoc/gmsm v1.4.1 // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/veraison/go-cose v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect - github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 // indirect - github.com/yvasiyarov/gorelic v0.0.7 // indirect - github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 // indirect - go.opencensus.io v0.23.0 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92 // indirect - golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect - golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect - google.golang.org/grpc v1.47.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/errs v1.4.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.130.1 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.8.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.36.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.2 // indirect - k8s.io/apiserver v0.24.2 // indirect - k8s.io/cli-runtime v0.24.2 // indirect - k8s.io/component-base v0.24.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6 // indirect - k8s.io/kubectl v0.24.2 // indirect - oras.land/oras-go v1.2.0 // indirect - sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect - sigs.k8s.io/kustomize/api v0.11.4 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/apiextensions-apiserver v0.34.0 // indirect + k8s.io/apiserver v0.34.0 // indirect + k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kubectl v0.34.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/release-utils v0.11.1 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) + +retract v0.32.0 // Refers to incorrect ./api version. diff --git a/go.sum b/go.sum index 84326655b..369cd9509 100644 --- a/go.sum +++ b/go.sum @@ -1,1157 +1,973 @@ -4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= -github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= +cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.56.1 h1:n6gy+yLnHn0hTwBFzNn8zJ1kqWfR91wzdM8hjRF4wP0= +cloud.google.com/go/storage v1.56.1/go.mod h1:C9xuCZgFl3buo2HZU/1FncgvvOgTAs/rnh4gF4lMg0s= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1 h1:mRwydyTyhtRX2wXS3mqYWzR2qlv6KsmoKXmlz5vInjg= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg= +cuelang.org/go v0.12.1 h1:5I+zxmXim9MmiN2tqRapIqowQxABv2NKTgbOspud1Eo= +cuelang.org/go v0.12.1/go.mod h1:B4+kjvGGQnbkz+GuAv1dq/R308gTkp0sO28FdMrJ2Kw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 h1:ci6Yd6nysBRLEodoziB6ah1+YOzZbZk+NYneoA6q+6E= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 h1:ldKsKtEIblsgsr6mPwrd9yRntoX6uLz/K89wsldwx/k= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3/go.mod h1:MAm7bk0oDLmD8yIkvfbxPW04fxzphPyL+7GzwHxOp6Y= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 h1:figxyQZXzZQIcP3njhC68bYUiTw45J8/SsHaLW8Ax0M= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0/go.mod h1:TmlMW4W5OvXOmOyKNnor8nlMMiO1ctIyzmHme/VHsrA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= -github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= -github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20220623141421-5afb4c282135 h1:xDc/cFH/hwyr9KyWc0sm26lpsscqtfZBvU8NpRLHwJ0= -github.com/ProtonMail/go-crypto v0.0.0-20220623141421-5afb4c282135/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= +github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= +github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= +github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= +github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU= +github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= +github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY= +github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA= +github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= +github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY= +github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask= +github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= +github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= +github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= +github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= +github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.53 h1:2MErE8gRyBLuE1fuH2Sqlj1xoN3S6/jXb0aO/A1jGfk= -github.com/aws/aws-sdk-go v1.44.53/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250= -github.com/aws/aws-sdk-go-v2 v1.16.4 h1:swQTEQUyJF/UkEA94/Ga55miiKFoXmm/Zd67XHgmjSg= -github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= -github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA= -github.com/aws/aws-sdk-go-v2/config v1.15.8 h1:Mk9aPT1JiPkhZO9PIP1w2ramuRw95d9w5YNOM3poTKk= -github.com/aws/aws-sdk-go-v2/config v1.15.8/go.mod h1:Z/guryqWzLw1T3pJbFA0/V3aVXw0sX5oH4lXXiD67aY= -github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc= -github.com/aws/aws-sdk-go-v2/credentials v1.12.3 h1:1kPx2lGjvopx7IMqKFmqmhqcuDZQ7pvq9xNXPP5c6qo= -github.com/aws/aws-sdk-go-v2/credentials v1.12.3/go.mod h1:p6/NGiaGKKM3ihOt/W08Ikz7/F95WhvgjA4x6MWKdS8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5 h1:YPxclBeE07HsLQE8vtjC8T2emcTjM9nzqsnDi2fv5UM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DETFl5NmX3kKqCzw7aau9NHAGcm4QE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11 h1:gsqHplNh1DaQunEKZISK56wlpbCg0yKxNVvGWCFuF1k= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5 h1:PLFj+M2PgIDHG//hw3T0O0KLI4itVtAjtxrZx4AHPLg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12 h1:j0VqrjtgsY1Bx27tD0ysay36/K4kFMWRp9K3ieO9nLU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g= -github.com/aws/aws-sdk-go-v2/service/ecr v1.4.1/go.mod h1:FglZcyeiBqcbvyinl+n14aT/EWC7S1MIH+Gan2iizt0= -github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 h1:W9vzPbvX7rOa/FacbQIDfnNrwxHkn5O+DdfmiIS4cHc= -github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5/go.mod h1:vk2+DbeZQFXznxJZSMnYrfnCHYxg4oT4Mdh59wSCkw4= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.4.1/go.mod h1:eD5Eo4drVP2FLTw0G+SMIPWNWvQRGGTtIZR2XeAagoA= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 h1:Y8dpvUxU4JecYktR5oNFEW+HmUWlA1Oh7mboTVyQWLg= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5/go.mod h1:gW979HGZOrhGvwjAS6VRgav6M9AYH9Kbey6y3GfF/EA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5 h1:gRW1ZisKc93EWEORNJRvy/ZydF3o6xLSveJHdi1Oa0U= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHDnzZP/sKbhDWV2T1EOzFIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.6 h1:AnTIdD439WgYNyVldYlpccGWY2EIXoUNmVzTDbFqCsg= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.6/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.6 h1:aYToU0/iazkMY67/BYLt3r6/LT/mUtarLAF5mGof1Kg= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXFvODAtXpm34Egf0lL0eshaQ= -github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= -github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04 h1:p2I85zYI9z5/c/3Q0LiO3RtNXcmXHTtJfml/hV16zNg= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04/go.mod h1:Z+bXnIbhKJYSvxNwsNnwde7pDKxuqlEZCbUBoTwAqf0= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= +github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 h1:lcwFjRx3C/hBxJzoWkD6DIG2jeB+mzLmFVBFVOadxxE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1/go.mod h1:qt9OL5kXqWoSub4QAkOF74mS3M2zOTNxMODqgwEUjt8= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 h1:EfatDVSMFxaS5TiR0C0zssQU1Nm+rGx3VbUGIH1y274= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2/go.mod h1:oRy1IEgzXtOkEk4B/J7HZbXUC258drDLtkmc++lN7IA= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 h1:Txq5jxY/ao+2Vx/kX9+65WTqkzCnxSlXnwIj+Cr/fng= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1/go.mod h1:+hYFg3laewH0YCfJRv+o5R3bradDKmFIm/uaiaD1U7U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0 h1:2jKyib9msVrAVn+lngwlSplG13RpUZmzVte2yDao5nc= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0/go.mod h1:RyhzxkWGcfixlkieewzpO3D4P4fTMxhIDqDZWsh0u/4= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 h1:50sS0RWhGpW/yZx2KcDNEb1u1MANv5BMEkJgcieEDTA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1/go.mod h1:ErZOtbzuHabipRTDTor0inoRlYwbsV1ovwSxjGs/uJo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 h1:B91r9bHtXp/+XRgS5aZm6ZzTdz3ahgJYmkt4xZkgDz8= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0/go.mod h1:OeVe5ggFzoBnmgitZe/A+BqGOnv1DvU/0uiLQi1wutM= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= -github.com/bshuster-repo/logrus-logstash-hook v1.0.2/go.mod h1:HgYntJprnHSPaF9VPPPLP1L5S1vMWxRfa1J+vzDrDTw= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v2.1.2+incompatible h1:E7dor84qzwUO8KdCM68CZwq9QOSR7HXlLx3Wj5vui2s= -github.com/bugsnag/bugsnag-go v2.1.2+incompatible/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3pU= -github.com/bugsnag/panicwrap v1.3.4/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/buildkite/agent/v3 v3.98.2 h1:VOOxv8XD8HVCtEvtRPQhvB6k2Gorha2gN1wGh94gYAA= +github.com/buildkite/agent/v3 v3.98.2/go.mod h1:+zCvvo/OlOwfs+AH3QvSn37H3cBXP3Fe18eoSbqUvnY= +github.com/buildkite/go-pipeline v0.13.3 h1:llI7sAdZ7sqYE7r8ePlmDADRhJ1K0Kua2+gv74Z9+Es= +github.com/buildkite/go-pipeline v0.13.3/go.mod h1:1uC2XdHkTV1G5jYv9K8omERIwrsYbBruBrPx1Zu1uFw= +github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.3.1 h1:t7K30ceLLYn6k7hQP4oq1c7dVlhgD5nRcuSRDEEnY1s= +github.com/buildkite/roko v1.3.1/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 h1:9Qh4lJ/KMr5iS1zfZ8I97+3MDpiKjl+0lZVUNBhdvRs= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08/go.mod h1:MAuu1uDJNOS3T3ui0qmKdPUwm59+bO19BbTph2wZafE= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= +github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= +github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= -github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0= -github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= -github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk= -github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= -github.com/darkowlzz/controller-check v0.0.0-20220325122359-11f5827b7981 h1:4GBOSRDmbX+zPT0vV67ay6036Eqz1rh6kZGydsfyh3o= -github.com/darkowlzz/controller-check v0.0.0-20220325122359-11f5827b7981/go.mod h1:haYO9UW76kUUKpIBbv3ydaU5wZ/7r0yqp61PGzVRSYU= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= +github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= +github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg= +github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.19 h1:tUN6H7LWqNx4hQVxomd0CVsDwaDr9gaRQaI4GpSmrsA= +github.com/creack/pty v1.1.19/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/distribution/v3 v3.0.0-20220729163034-26163d82560f h1:3NCYdjXycNd/Xn/iICZzmxkiDX1e1cjTHjbMAz+wRVk= -github.com/distribution/distribution/v3 v3.0.0-20220729163034-26163d82560f/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= -github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20220529153421-8ea89ba92021 h1:EbF0UihnxWRcIMOwoVtqnAylsqcjzqpSvMdjF2Ud4rA= -github.com/elazarl/goproxy v0.0.0-20220529153421-8ea89ba92021/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= -github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= +github.com/emicklei/proto v1.13.4/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/fluxcd/gitkit v0.5.1 h1:kmpXs0g+eNuoq9CUzGppGadVF+c7j4n2kPYE/bvkMD0= -github.com/fluxcd/gitkit v0.5.1/go.mod h1:svOHuKi0fO9HoawdK4HfHAJJseZDHHjk7I3ihnCIqNo= -github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= -github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.14.2 h1:/Hf7I/Vz01vv3m7Qx7DtQvrzAL1oVt0MJcLb/I1Y1HE= -github.com/fluxcd/pkg/apis/meta v0.14.2/go.mod h1:ijZ61VG/8T3U17gj0aFL3fdtZL+mulD6V8VrLLUCAgM= -github.com/fluxcd/pkg/gittestserver v0.5.4 h1:qRPtyjtJ98lDL5w5gnOjTjjBLWWq7+gknrVqC77mwuU= -github.com/fluxcd/pkg/gittestserver v0.5.4/go.mod h1:h84tnNBKIlOaZWS2HeQNkCH5WKHD6sUsjwIRhZunX0Q= -github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= -github.com/fluxcd/pkg/gitutil v0.1.0/go.mod h1:Ybz50Ck5gkcnvF0TagaMwtlRy3X3wXuiri1HVsK5id4= -github.com/fluxcd/pkg/helmtestserver v0.7.4 h1:/Xj2+XLz7wr38MI3uPYvVAsZB9wQOq6rp3Drr3EOfhg= -github.com/fluxcd/pkg/helmtestserver v0.7.4/go.mod h1:aL5V4o8wUOMqeHMfjbVHS057E3ejzHMRVMqEbsK9FUQ= -github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= -github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/masktoken v0.0.1 h1:egWR/ibTzf4L3PxE8TauKO1srD1Ye/aalgQRQuKKRdU= -github.com/fluxcd/pkg/masktoken v0.0.1/go.mod h1:sQmMtX4s5RwdGlByJazzNasWFFgBdmtNcgeZcGBI72Y= -github.com/fluxcd/pkg/oci v0.3.0 h1:GFn6JZeg5fV2K4vsQ0s5lJFid6qrpA4RybLXL+7qUbQ= -github.com/fluxcd/pkg/oci v0.3.0/go.mod h1:c1pj9E/G5927gSa6ooACAyZe+HwjgmPk9johL7oXDHw= -github.com/fluxcd/pkg/runtime v0.16.2 h1:CexfMmJK+r12sHTvKWyAax0pcPomjd6VnaHXcxjUrRY= -github.com/fluxcd/pkg/runtime v0.16.2/go.mod h1:OHSKsrO+T+Ym8WZRS2oidrnauWRARuE2nfm8ewevm7M= -github.com/fluxcd/pkg/ssh v0.5.0 h1:jE9F2XvUXC2mgseeXMATvO014fLqdB30/VzlPLKsk20= -github.com/fluxcd/pkg/ssh v0.5.0/go.mod h1:KGgOUOy1uI6RC6+qxIBLvP1AeOOs/nLB25Ca6TZMIXE= -github.com/fluxcd/pkg/testserver v0.2.0 h1:Mj0TapmKaywI6Fi5wvt1LAZpakUHmtzWQpJNKQ0Krt4= -github.com/fluxcd/pkg/testserver v0.2.0/go.mod h1:bgjjydkXsZTeFzjz9Cr4heGANr41uTB1Aj1Q5qzuYVk= -github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= -github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= -github.com/fluxcd/pkg/version v0.1.0 h1:v+SmCanmCB5Tj2Cx9TXlj+kNRfPGbAvirkeqsp7ZEAQ= -github.com/fluxcd/pkg/version v0.1.0/go.mod h1:V7Z/w8dxLQzv0FHqa5ox5TeyOd2zOd49EeuWFgnwyj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fluxcd/cli-utils v0.36.0-flux.15 h1:Et5QLnIpRjj+oZtM9gEybkAaoNsjysHq0y1253Ai94Y= +github.com/fluxcd/cli-utils v0.36.0-flux.15/go.mod h1:AqRUmWIfNE7cdL6NWSGF0bAlypGs+9x5UQ2qOtlEzv4= +github.com/fluxcd/gitkit v0.6.0 h1:iNg5LTx6ePo+Pl0ZwqHTAkhbUHxGVSY3YCxCdw7VIFg= +github.com/fluxcd/gitkit v0.6.0/go.mod h1:svOHuKi0fO9HoawdK4HfHAJJseZDHHjk7I3ihnCIqNo= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/event v0.19.0 h1:ZJU2voontkzp5rNYA4JMOu40S4tRcrWi4Do59EnyFwg= +github.com/fluxcd/pkg/apis/event v0.19.0/go.mod h1:deuIyUb6lh+Z1Ccvwwxhm1wNM3kpSo+vF1IgRnpaZfQ= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fluxcd/pkg/artifact v0.3.0 h1:Mxescx4HOaXJDYhdgecmZwGdnrgPFu/N6sJY9GuTpuo= +github.com/fluxcd/pkg/artifact v0.3.0/go.mod h1:CFtfSBcma+WBkIhjxleaXoCwIjccdkunLO7gv/59xe8= +github.com/fluxcd/pkg/auth v0.31.0 h1:PIwSn7Onq74cGDTocZJZ6P47FxGvbT8NIW7UKFm51rU= +github.com/fluxcd/pkg/auth v0.31.0/go.mod h1:Qxc5OKRMLBwtxO0nf2stm4ZkgzXcrvF6x6BSquiAMW8= +github.com/fluxcd/pkg/cache v0.11.0 h1:fsE8S+una21fSNw4MDXGUIf0Gf1J+pqa4RbsVKf2aTI= +github.com/fluxcd/pkg/cache v0.11.0/go.mod h1:2RTIU6PsJniHmfnllQWFEo7fa5V8KQlnMgn4o0sme40= +github.com/fluxcd/pkg/git v0.36.0 h1:oakFKxTX5yiLcFzCS1SaV+mMXaODaF1Ic6/oCLfIe7I= +github.com/fluxcd/pkg/git v0.36.0/go.mod h1:4TgfjcoM3B2sGsO5VbfBSwJQYzNCONGihcTOW8P3Jxw= +github.com/fluxcd/pkg/git/gogit v0.40.0 h1:VCsHC1440jMk1wAGWCwkgU2nDUBOPeYbCk6/OtvbY7Y= +github.com/fluxcd/pkg/git/gogit v0.40.0/go.mod h1:nQVyfa+rYSeVQiwVH5f/C4o1sf2MtMFjMlt3VSkC+P0= +github.com/fluxcd/pkg/gittestserver v0.20.0 h1:xhzLV89mta23ZvTK0cpDCR6ni6vp5Di+9b4v3YBziMQ= +github.com/fluxcd/pkg/gittestserver v0.20.0/go.mod h1:vGmM9eDJk56gx+osTcSHeScefnAaL4czR+rsNsvh0nw= +github.com/fluxcd/pkg/helmtestserver v0.30.0 h1:gEJ6kHei8/SB8J/YemeWaypCxRtfmoejqMxtEOlZRgI= +github.com/fluxcd/pkg/helmtestserver v0.30.0/go.mod h1:xXOkfz7/4z8fz9GJYrYVB9we7bvtmdKKedBeGPHVlhs= +github.com/fluxcd/pkg/http/transport v0.7.0 h1:LbA0qzh1lT6GncWLkN/BjbSMrN8bdFtaa2TqxiIdyzs= +github.com/fluxcd/pkg/http/transport v0.7.0/go.mod h1:G3ptGZKlY0PJZsvWCwzV9vKQ90yfP/mKT2/ZdAud9LE= +github.com/fluxcd/pkg/lockedfile v0.7.0 h1:tmzW2GeMGuJMiCcVloXVd1vKZ92anm9WGkRgOBpWfRk= +github.com/fluxcd/pkg/lockedfile v0.7.0/go.mod h1:AzCV/h1N3hi/KtUDUCUgS8hl1+a1y+I6pmRo25dxdK0= +github.com/fluxcd/pkg/masktoken v0.8.0 h1:Dm5xIVNbg0s6zNttjDvimaG38bKsXwxBVo5b+D7ThVU= +github.com/fluxcd/pkg/masktoken v0.8.0/go.mod h1:Gc73ALOqIe+5Gj2V3JggMNiYcBiZ9bNNDYBE9R5XTTg= +github.com/fluxcd/pkg/oci v0.56.0 h1:t/jnHpizC+j7Gugw8y14HdfHnhLIgmxR3yNdArghUrM= +github.com/fluxcd/pkg/oci v0.56.0/go.mod h1:WZxMYYWfugc4rtnq2zHUIHxH0+e6IRhP9EDq+mW/Z2w= +github.com/fluxcd/pkg/runtime v0.84.0 h1:3M+egIQwQU9YYjKQkczyawG+9RUOkGtkDMQlePnEeTM= +github.com/fluxcd/pkg/runtime v0.84.0/go.mod h1:Wt9mUzQgMPQMu2D/wKl5pG4zh5vu/tfF5wq9pPobxOQ= +github.com/fluxcd/pkg/sourceignore v0.14.0 h1:ZiZzbXtXb/Qp7I7JCStsxOlX8ri8rWwCvmvIrJ0UzQQ= +github.com/fluxcd/pkg/sourceignore v0.14.0/go.mod h1:E3zKvyTyB+oQKqm/2I/jS6Rrt3B7fNuig/4bY2vi3bg= +github.com/fluxcd/pkg/ssh v0.21.0 h1:ZmyF0n9je0cTTkOpvFVgIhmdx9qtswnVE60TK4IzJh0= +github.com/fluxcd/pkg/ssh v0.21.0/go.mod h1:nX+gvJOmjf0E7lxq5mKKzDIdPEL2jOUQZbkBMS+mDtk= +github.com/fluxcd/pkg/tar v0.14.0 h1:9Gku8FIvPt2bixKldZnzXJ/t+7SloxePlzyVGOK8GVQ= +github.com/fluxcd/pkg/tar v0.14.0/go.mod h1:+rOWYk93qLEJ8WwmkvJOkB8i0dna1mrwJFybE8i9Udo= +github.com/fluxcd/pkg/testserver v0.13.0 h1:xEpBcEYtD7bwvZ+i0ZmChxKkDo/wfQEV3xmnzVybSSg= +github.com/fluxcd/pkg/testserver v0.13.0/go.mod h1:akRYv3FLQUsme15na9ihECRG6hBuqni4XEY9W8kzs8E= +github.com/fluxcd/pkg/version v0.10.0 h1:WETlCRbfbocsDItkCCeh/4x4zQkZ5i/lUe7P7VaQBrI= +github.com/fluxcd/pkg/version v0.10.0/go.mod h1:dgmjEq4ykvBnqK1oVXM+hcXx3kAY/b4uZDYUn8XnHjk= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= +github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= +github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU= +github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-piv/piv-go/v2 v2.3.0 h1:kKkrYlgLQTMPA6BiSL25A7/x4CEh2YCG7rtb/aTkx+g= +github.com/go-piv/piv-go/v2 v2.3.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= -github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= -github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= -github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= -github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.10.0 h1:qd/fv2nQajGZJenaNcdaghlwSPjQ0NphN9hzArr2WWg= -github.com/google/go-containerregistry v0.10.0/go.mod h1:C7uwbB1QUAtvnknyd3ethxJRd4gtEjU/9WLXzckfI1Y= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220712174516-ddd39fb9c385 h1:5YpLgrjMUhTXx6aQOHs7CmuleIwp0mLB8UcWH0IsSD8= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220712174516-ddd39fb9c385/go.mod h1:FUBeAeOrhHeM8/cPyFCp8WvdekKo05mh6GKvE60SC8I= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20220523143934-b17c48b086b7 h1:b3NHmEfe3oGfuPaW8H5r92NWSK8bL50UVnxRWS+YQOE= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20220523143934-b17c48b086b7/go.mod h1:hCxWNnETMVVnSa7iue+awKrZS87UPoqgKF8RNOQomPA= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 h1:1d9SJvpHXjFuYBHAS5576memil93kLpgBZ5OjdtvW4I= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039/go.mod h1:AlUTqI/YtH9ckkhLo4ClTAccEOZz8EaLVxqrfv56OFg= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f h1:GJRzEBoJv/A/E7JbTekq1Q0jFtAfY7TIxUFAK89Mmic= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f/go.mod h1:ZT74/OE6eosKneM9/LQItNxIMBV6CI5S46EXAnvkTBI= +github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM= +github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4= +github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= +github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ= -github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= -github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= -github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= -github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.31 h1:zsJ3qPDeU3bC5UMVi9HJ4ED0lyEzrNd3iQguglZS5FE= -github.com/minio/minio-go/v7 v7.0.31/go.mod h1:/sjRKkKIA75CKh1iu8E3qBy7ktBmCCDGII0zbXGwbUk= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= +github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM= +github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= +github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/notaryproject/notation-core-go v1.3.0 h1:mWJaw1QBpBxpjLSiKOjzbZvB+xh2Abzk14FHWQ+9Kfs= +github.com/notaryproject/notation-core-go v1.3.0/go.mod h1:hzvEOit5lXfNATGNBT8UQRx2J6Fiw/dq/78TQL8aE64= +github.com/notaryproject/notation-go v1.3.2 h1:4223iLXOHhEV7ZPzIUJEwwMkhlgzoYFCsMJvSH1Chb8= +github.com/notaryproject/notation-go v1.3.2/go.mod h1:/1kuq5WuLF6Gaer5re0Z6HlkQRlKYO4EbWWT/L7J1Uw= +github.com/notaryproject/notation-plugin-framework-go v1.0.0 h1:6Qzr7DGXoCgXEQN+1gTZWuJAZvxh3p8Lryjn5FaLzi4= +github.com/notaryproject/notation-plugin-framework-go v1.0.0/go.mod h1:RqWSrTOtEASCrGOEffq0n8pSg2KOgKYiWqFWczRSics= +github.com/notaryproject/tspclient-go v1.0.0 h1:AwQ4x0gX8IHnyiZB1tggpn5NFqHpTEm1SDX8YNv4Dg4= +github.com/notaryproject/tspclient-go v1.0.0/go.mod h1:LGyA/6Kwd2FlM0uk8Vc5il3j0CddbWSHBj/4kxQDbjs= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= -github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE= -github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/otiai10/mint v1.3.3 h1:7JgpsBaN0uMkyju4tbYHu0mnM55hNKVYLsXmwr15NQI= -github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/open-policy-agent/opa v1.5.1 h1:LTxxBJusMVjfs67W4FoRcnMfXADIGFMzpqnfk6D08Cg= +github.com/open-policy-agent/opa v1.5.1/go.mod h1:bYbS7u+uhTI+cxHQIpzvr5hxX0hV7urWtY+38ZtjMgk= +github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be h1:f2PlhC9pm5sqpBZFvnAoKj+KzXRzbjFMA+TqXfJdgho= +github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a h1:IAncDmJeD90l6+YR1Gf6r0HrmnRmOatzPfUpMS80ZTI= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a/go.mod h1:kqQaIc6bZstKgnGpL7GD5dWoLKbA6mH1Y9ULjGImBnM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.4 h1:yWFgLkghp71D76Fa0l349yAl5g4Gse7DPYNlvkQ9Eiw= +github.com/opencontainers/runc v1.2.4/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= +github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pjbgf/git2go/v33 v33.0.9-nothread-check h1:gSK7FaLECIM3VSuBOAsVZQtWd+51iTB5lv9RyxhOYMk= -github.com/pjbgf/git2go/v33 v33.0.9-nothread-check/go.mod h1:KdpqkU+6+++4oHna/MIOgx4GCQ92IPCdpVRMRI80J+4= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= +github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rubenv/sql-migrate v1.1.2 h1:9M6oj4e//owVVHYrFISmY9LBRw6gzkCNmD9MV36tZeQ= -github.com/rubenv/sql-migrate v1.1.2/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d h1:HWfigq7lB31IeJL8iy7jkUmU/PG1Sr8jVGhS749dbUA= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= -github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sigstore/cosign/v2 v2.5.2 h1:i5Dw7M7W9OcWgyiknJB8vNx/07KweninBDxRoHPxqHE= +github.com/sigstore/cosign/v2 v2.5.2/go.mod h1:CYlcgkPQJZ5pvWlbl7mOfO/Q1S1N7r4tpdYCtFwhXco= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= +github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= +github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= +github.com/sigstore/sigstore-go v1.0.0 h1:4N07S2zLxf09nTRwaPKyAxbKzpM8WJYUS8lWWaYxneU= +github.com/sigstore/sigstore-go v1.0.0/go.mod h1:UYsZ/XHE4eltv1o1Lu+n6poW1Z5to3f0+emvfXNxIN8= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 h1:qp2VFyKuFQvTGmZwk5Q7m5nE4NwnF9tHwkyz0gtWAck= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5/go.mod h1:DKlQjjr+GsWljEYPycI0Sf8URLCk4EbGA9qYjF47j4g= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 h1:CRZcdYn5AOptStsLRAAACudAVmb1qUbhMlzrvm7ju3o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5/go.mod h1:b9rFfITq2fp1M3oJmq6lFFhSrAz5vOEJH1qzbMsZWN4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5 h1:7U0GsO0UGG1PdtgS6wBkRC0sMgq7BRVaFlPRwN4m1Qg= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5/go.mod h1:/2qrI0nnCy/DTIPOMFaZlFnNPWEn5UeS70P37XEM88o= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 h1:S2ukEfN1orLKw2wEQIUHDDlzk0YcylhcheeZ5TGk8LI= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5/go.mod h1:m7sQxVJmDa+rsmS1m6biQxaLX83pzNS7ThUEyjOqkCU= +github.com/sigstore/timestamp-authority v1.2.8 h1:BEV3fkphwU4zBp3allFAhCqQb99HkiyCXB853RIwuEE= +github.com/sigstore/timestamp-authority v1.2.8/go.mod h1:G2/0hAZmLPnevEwT1S9IvtNHUm9Ktzvso6xuRhl94ZY= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= -github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1159,855 +975,445 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.4 h1:wZRexSlwd7ZXfKINDLsO4r7WBt3gTKONc6K/VesHvHM= -github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= -github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= -github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= -github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.1 h1:AmzO1SSWxw73zxFZPRwaMN1MohDw8UyHnmuxyceTEGo= -github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.1.1 h1:OWcoHItwsGO+7m0wLa7FDWPR4oB1cj0zOr1kosE4G+I= +github.com/theupdateframework/go-tuf/v2 v2.1.1/go.mod h1:V675cQGhZONR0OGQ8r1feO0uwtsTBYPDWHzAAPn5rjE= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.4.0 h1:8VPZeZI4EeZ8P/vB6SIkhlStrJfivTJn+cQ4dtyHNh0= +github.com/tink-crypto/tink-go/v2 v2.4.0/go.mod h1:l//evrF2Y3MjdbpNDNGnKgCpo5zSmvUvnQ4MU+yE2sw= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= +github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.26 h1:REqqFkO8+SOEgZHR/eHScjjVjGS8Nk3RMO/juiTobN4= +github.com/vektah/gqlparser/v2 v2.5.26/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/veraison/go-cose v1.3.0 h1:2/H5w8kdSpQJyVtIhx8gmwPJ2uSz1PkyWFx0idbd7rk= +github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U= -github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.7 h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE= -github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= +github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= +github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= +gitlab.com/gitlab-org/api/client-go v0.130.1 h1:1xF5C5Zq3sFeNg3PzS2z63oqrxifne3n/OnbI7nptRc= +gitlab.com/gitlab-org/api/client-go v0.130.1/go.mod h1:ZhSxLAWadqP6J9lMh40IAZOlOxBLPRh7yFOXR/bMJWM= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= +go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= +go.step.sm/crypto v0.66.0 h1:9TW6BEguOtcS9NIjja9bDQ+j8OjhenU/F6lJfHjbXNU= +go.step.sm/crypto v0.66.0/go.mod h1:anqGyvO/Px05D1mznHq4/a9wwP1I1DmMZvk+TWX5Dzo= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0= -golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92 h1:oVlhw3Oe+1reYsE2Nqu19PDJfLzwdU3QUUrG86rLK68= -golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U= -google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.248.0 h1:hUotakSkcwGdYUqzCRc5yGYsg4wXxpkKlW5ryVqvC1Y= +google.golang.org/api v0.248.0/go.mod h1:yAFUAF56Li7IuIQbTFoLwXTCI6XCFKueOlS7S9e4F9k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= -helm.sh/helm/v3 v3.9.1 h1:i1ChBu5ZB01kMaN2Y4KaC7J6viT58L2pHXWrXJ0Ny58= -helm.sh/helm/v3 v3.9.1/go.mod h1:y/dJc/0Lzcn40jgd85KQXnufhFF7sr4v6L/vYMLRaRM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= +helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= -k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.24.2 h1:orxipm5elPJSkkFNlwH9ClqaKEDJJA3yR2cAAlCnyj4= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/cli-runtime v0.24.2 h1:KxY6tSgPGsahA6c1/dmR3uF5jOxXPx2QQY6C5ZrLmtE= -k8s.io/cli-runtime v0.24.2/go.mod h1:1LIhKL2RblkhfG4v5lZEt7FtgFG5mVb8wqv5lE9m5qY= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/component-helpers v0.24.2/go.mod h1:TRQPBQKfmqkmV6c0HAmUs8cXVNYYYLsXy4zu8eODi9g= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6 h1:nBQrWPlrNIiw0BsX6a6MKr1itkm0ZS0Nl97kNLitFfI= -k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M= -k8s.io/kubectl v0.24.2 h1:+RfQVhth8akUmIc2Ge8krMl/pt66V7210ka3RE/p0J4= -k8s.io/kubectl v0.24.2/go.mod h1:+HIFJc0bA6Tzu5O/YcuUt45APAxnNL8LeMuXwoiGsPg= -k8s.io/metrics v0.24.2/go.mod h1:5NWURxZ6Lz5gj8TFU83+vdWIVASx7W8lwPpHYCqopMo= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -oras.land/oras-go v1.2.0 h1:yoKosVIbsPoFMqAIFHTnrmOuafHal+J/r+I5bdbVWu4= -oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/cli-utils v0.31.2 h1:0yX0GPyvbc+yAEWwWlhgHlPF7JtvlLco6HjolSWewt4= -sigs.k8s.io/cli-utils v0.31.2/go.mod h1:g/zB9hJ5eUN7zIEBIxrO0CwhXU4YISJ+BkLJzvWwlEs= -sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA= -sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= -sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo= -sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs= -sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/release-utils v0.11.1 h1:hzvXGpHgHJfLOJB6TRuu14bzWc3XEglHmXHJqwClSZE= +sigs.k8s.io/release-utils v0.11.1/go.mod h1:ybR2V/uQAOGxYfzYtBenSYeXWkBGNP2qnEiX77ACtpc= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/hack/api-docs/config.json b/hack/api-docs/config.json index 26c4082f8..ea8b2b9a5 100644 --- a/hack/api-docs/config.json +++ b/hack/api-docs/config.json @@ -9,11 +9,11 @@ "externalPackages": [ { "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", - "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" + "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" }, { "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Condition$", - "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition" + "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition" }, { "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", @@ -21,11 +21,15 @@ }, { "typeMatchPrefix": "^github.com/fluxcd/pkg/apis/meta", - "docsURLTemplate": "https://godoc.org/github.com/fluxcd/pkg/apis/meta#{{ .TypeIdentifier }}" + "docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#{{ .TypeIdentifier }}" }, { "typeMatchPrefix": "^github.com/fluxcd/pkg/apis/acl", - "docsURLTemplate": "https://godoc.org/github.com/fluxcd/pkg/apis/acl#{{ .TypeIdentifier }}" + "docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#{{ .TypeIdentifier }}" + }, + { + "typeMatchPrefix": "^github.com/fluxcd/source-controller/api/v1", + "docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#{{ .TypeIdentifier }}" } ], "typeDisplayNamePrefixOverrides": { diff --git a/hack/api-docs/template/pkg.tpl b/hack/api-docs/template/pkg.tpl index f2b3140f2..0cb681a67 100644 --- a/hack/api-docs/template/pkg.tpl +++ b/hack/api-docs/template/pkg.tpl @@ -1,5 +1,10 @@ {{ define "packages" }} -

    Source API reference

    +

    Source API reference + {{- with (index .packages 0) -}} + {{ with (index .GoPackages 0 ) -}} + {{ printf " %s" .Name -}} + {{ end -}} + {{ end }}

    {{ with .packages}}

    Packages:

    diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 74dbebc30..f186b9dd3 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2022 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/hack/ci/e2e.sh b/hack/ci/e2e.sh index 3d7dcb5bd..b00eda00c 100755 --- a/hack/ci/e2e.sh +++ b/hack/ci/e2e.sh @@ -6,16 +6,14 @@ CREATE_CLUSTER="${CREATE_CLUSTER:-true}" KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kind}" LOAD_IMG_INTO_KIND="${LOAD_IMG_INTO_KIND:-true}" BUILD_PLATFORM="${BUILD_PLATFORM:-linux/amd64}" -MINIO_HELM_VER="${MINIO_HELM_VER:-v6.3.1}" -# Older tags do not bundle multiple architectures. Newer tags are 5-6 times larger. -MINIO_TAG="${MINIO_TAG:-RELEASE.2020-09-17T04-49-20Z}" +MINIO_HELM_VER="${MINIO_HELM_VER:-12.10.3}" IMG=test/source-controller TAG=latest -MC_RELEASE=mc.RELEASE.2021-12-16T23-38-39Z -MC_AMD64_SHA256=d14302bbdaa180a073c1627ff9fbf55243221e33d47e32df61a950f635810978 -MC_ARM64_SHA256=00791995bf8d102e3159e23b3af2f5e6f4c784fafd88c60161dcf3f0169aa217 +MC_RELEASE=mc.RELEASE.2023-11-20T16-30-59Z +MC_AMD64_SHA256=fdd901a5169d676f32483f9a2de977b7ff3a4fe83e254dcbc35e7a1545591565 +MC_ARM64_SHA256=09816180f560875d344dc436ed4ec1348b3ff0c836ae9cf0415fef602489cc11 ROOT_DIR="$(git rev-parse --show-toplevel)" BUILD_DIR="${ROOT_DIR}/build" @@ -75,7 +73,6 @@ kubectl -n source-system rollout status deploy/source-controller --timeout=1m kubectl -n source-system wait gitrepository/gitrepository-sample --for=condition=ready --timeout=1m kubectl -n source-system wait ocirepository/ocirepository-sample --for=condition=ready --timeout=1m kubectl -n source-system wait helmrepository/helmrepository-sample --for=condition=ready --timeout=1m -kubectl -n source-system wait helmrepository/helmrepository-sample-oci --for=condition=ready --timeout=1m kubectl -n source-system wait helmchart/helmchart-sample --for=condition=ready --timeout=1m kubectl -n source-system wait helmchart/helmchart-sample-oci --for=condition=ready --timeout=1m kubectl -n source-system delete -f "${ROOT_DIR}/config/samples" @@ -88,15 +85,14 @@ kubectl -n source-system delete -f "${ROOT_DIR}/config/testdata/helmchart-values echo "Setup Minio" kubectl create ns minio -helm repo add minio https://helm.min.io/ --force-update -helm upgrade minio minio/minio --wait -i \ +helm upgrade minio oci://registry-1.docker.io/bitnamicharts/minio --wait -i \ --version "${MINIO_HELM_VER}" \ + --timeout 10m0s \ --namespace minio \ - --set accessKey=myaccesskey \ - --set secretKey=mysecretkey \ + --set auth.rootUser=myaccesskey \ + --set auth.rootPassword=mysecretkey \ --set resources.requests.memory=128Mi \ - --set persistence.enable=false \ - --set image.tag="${MINIO_TAG}" + --set persistence.enable=false kubectl -n minio port-forward svc/minio 9000:9000 &>/dev/null & sleep 2 @@ -133,7 +129,7 @@ kubectl -n source-system wait bucket/podinfo --for=condition=ready --timeout=1m echo "Run HelmChart from Bucket tests" "${BUILD_DIR}/mc" mb minio/charts -"${BUILD_DIR}/mc" mirror "${ROOT_DIR}/controllers/testdata/charts/helmchart/" minio/charts/helmchart +"${BUILD_DIR}/mc" mirror "${ROOT_DIR}/internal/controller/testdata/charts/helmchart/" minio/charts/helmchart kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-bucket/source.yaml" kubectl -n source-system wait bucket/charts --for=condition=ready --timeout=1m @@ -141,27 +137,27 @@ kubectl -n source-system wait helmchart/helmchart-bucket --for=condition=ready - echo "Run large Git repo tests" kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/git/large-repo.yaml" -kubectl -n source-system wait gitrepository/large-repo-go-git --for=condition=ready --timeout=2m15s -kubectl -n source-system wait gitrepository/large-repo-libgit2 --for=condition=ready --timeout=2m15s +kubectl -n source-system wait gitrepository/large-repo --for=condition=ready --timeout=2m15s +echo "Run HelmChart from OCI registry tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oci/source.yaml" +kubectl -n source-system wait helmchart/podinfo --for=condition=ready --timeout=1m +kubectl -n source-system wait helmchart/podinfo-keyless --for=condition=ready --timeout=1m -# Test experimental libgit2 transport. Any tests against the default transport must -# either run before this, or patch the deployment again to disable this, as once enabled -# only the managed transport will be used. -kubectl -n source-system patch deployment source-controller \ - --patch '{"spec": {"template": {"spec": {"containers": [{"name": "manager","env": [{"name": "EXPERIMENTAL_GIT_TRANSPORT", "value": "true"}]}]}}}}' - -# wait until the patch took effect and the new source-controller is running -sleep 20s - -kubectl -n source-system wait --for=condition=ready --timeout=1m -l app=source-controller pod +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oci/notation.yaml" +curl -sSLo notation.crt https://raw.githubusercontent.com/stefanprodan/podinfo/master/.notation/notation.crt +curl -sSLo trustpolicy.json https://raw.githubusercontent.com/stefanprodan/podinfo/master/.notation/trustpolicy.json +kubectl -n source-system create secret generic notation-config --from-file=notation.crt --from-file=trustpolicy.json --dry-run=client -o yaml | kubectl apply -f - +kubectl -n source-system wait helmchart/podinfo-notation --for=condition=ready --timeout=1m -echo "Re-run large libgit2 repo test with managed transport" -kubectl -n source-system wait gitrepository/large-repo-libgit2 --for=condition=ready --timeout=2m15s -kubectl -n source-system exec deploy/source-controller -- printenv | grep EXPERIMENTAL_GIT_TRANSPORT=true +echo "Run OCIRepository verify tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-key.yaml" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-keyless.yaml" +curl -sSLo cosign.pub https://raw.githubusercontent.com/stefanprodan/podinfo/master/.cosign/cosign.pub +kubectl -n source-system create secret generic cosign-key --from-file=cosign.pub --dry-run=client -o yaml | kubectl apply -f - +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-key --for=condition=ready --timeout=1m +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-keyless --for=condition=ready --timeout=1m -echo "Run HelmChart from OCI registry tests" -kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oci/source.yaml" -kubectl -n source-system wait helmrepository/podinfo --for=condition=ready --timeout=1m -kubectl -n source-system wait helmchart/podinfo --for=condition=ready --timeout=1m +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-notation.yaml" +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-notation --for=condition=ready --timeout=1m diff --git a/hack/install-libraries.sh b/hack/install-libraries.sh deleted file mode 100755 index afec8bc97..000000000 --- a/hack/install-libraries.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env bash - -set -euxo pipefail - -IMG="${IMG:-}" -TAG="${TAG:-}" -IMG_TAG="${IMG}:${TAG}" -DOWNLOAD_URL="https://github.com/fluxcd/golang-with-libgit2/releases/download/${TAG}" - -TMP_DIR=$(mktemp -d) - -function cleanup(){ - rm -rf "${TMP_DIR}" -} -trap cleanup EXIT - -fatal() { - echo '[ERROR] ' "$@" >&2 - exit 1 -} - -download() { - [[ $# -eq 2 ]] || fatal 'download needs exactly 2 arguments' - - curl -o "$1" -sfL "$2" - - [[ $? -eq 0 ]] || fatal 'Download failed' -} - -download_files() { - [[ $# -eq 1 ]] || fatal 'download_files needs exactly 1 arguments' - - FILE_NAMES="checksums.txt checksums.txt.sig checksums.txt.pem $1" - - for FILE_NAME in ${FILE_NAMES}; do - download "${TMP_DIR}/${FILE_NAME}" "${DOWNLOAD_URL}/${FILE_NAME}" - done -} - -cosign_verify(){ - [[ $# -eq 3 ]] || fatal 'cosign_verify needs exactly 3 arguments' - - cosign verify-blob --cert "$1" --signature "$2" "$3" - - [[ $? -eq 0 ]] || fatal 'signature verification failed' -} - -assure_provenance() { - [[ $# -eq 1 ]] || fatal 'assure_provenance needs exactly 1 arguments' - - cosign_verify "${TMP_DIR}/checksums.txt.pem" \ - "${TMP_DIR}/checksums.txt.sig" \ - "${TMP_DIR}/checksums.txt" - - pushd "${TMP_DIR}" || exit - if command -v sha256sum; then - grep "$1" "checksums.txt" | sha256sum --check - else - grep "$1" "checksums.txt" | shasum -a 256 --check - fi - popd || exit - - [[ $? -eq 0 ]] || fatal 'integrity verification failed' -} - -extract_libraries(){ - [[ $# -eq 2 ]] || fatal 'extract_libraries needs exactly 2 arguments' - - tar -xf "${TMP_DIR}/$1" - - rm "${TMP_DIR}/$1" - mv "${2}" "${TAG}" - mv "${TAG}/" "./build/libgit2" -} - -fix_pkgconfigs(){ - NEW_DIR="$(/bin/pwd)/build/libgit2/${TAG}" - - # Update the prefix paths included in the .pc files. - if [[ $OSTYPE == 'darwin'* ]]; then - # https://github.com/fluxcd/golang-with-libgit2/blob/v0.1.4/.github/workflows/release.yaml#L158 - INSTALLED_DIR="/Users/runner/work/golang-with-libgit2/golang-with-libgit2/build/darwin-libgit2-only" - - # This will make it easier to update to the location in which they will be used. - # sed has a sight different behaviour in MacOS - # NB: Some macOS users may override their sed with gsed. If gsed is the PATH, use that instead. - if command -v gsed &> /dev/null; then - find "${NEW_DIR}" -type f -name "*.pc" | xargs -I {} gsed -i "s;${INSTALLED_DIR};${NEW_DIR};g" {} - else - find "${NEW_DIR}" -type f -name "*.pc" | xargs -I {} sed -i "" "s;${INSTALLED_DIR};${NEW_DIR};g" {} - fi - else - # https://github.com/fluxcd/golang-with-libgit2/blob/v0.1.4/.github/workflows/release.yaml#L52 - INSTALLED_DIR="/home/runner/work/golang-with-libgit2/golang-with-libgit2/build/build_libgit2_only" - - find "${NEW_DIR}" -type f -name "*.pc" | xargs -I {} sed -i "s;${INSTALLED_DIR};${NEW_DIR};g" {} - fi -} - -extract_from_image(){ - PLATFORM=$1 - DIR=$2 - - id=$(docker create --platform="${PLATFORM}" "${IMG_TAG}" sh) - docker cp "${id}":/usr/local - > output.tar.gz - docker rm -v "${id}" - - tar -xf output.tar.gz "local/${DIR}" - rm output.tar.gz - - NEW_DIR="$(/bin/pwd)/build/libgit2/${TAG}" - INSTALLED_DIR="/usr/local/${DIR}" - - mv "local/${DIR}" "${TAG}" - rm -rf "local" - mv "${TAG}/" "./build/libgit2" - - # Update the prefix paths included in the .pc files. - # This will make it easier to update to the location in which they will be used. - find "${NEW_DIR}" -type f -name "*.pc" | xargs -I {} sed -i "s;${INSTALLED_DIR};${NEW_DIR};g" {} -} - -install_libraries(){ - if [ -d "./build/libgit2/${TAG}" ]; then - echo "Skipping: libgit2 ${TAG} already installed" - exit 0 - fi - - mkdir -p "./build/libgit2" - - # Linux ARM support is still based on the container image libraries. - if [[ $OSTYPE == 'linux'* ]]; then - if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then - extract_from_image "linux/arm64" "aarch64-alpine-linux-musl" - fix_pkgconfigs "aarch64-alpine-linux-musl" - exit 0 - fi - fi - - FILE_NAME="linux-$(uname -m)-libgit2-only.tar.gz" - DIR="linux-libgit2-only" - if [[ $OSTYPE == 'darwin'* ]]; then - FILE_NAME="darwin-libgit2-only.tar.gz" - DIR="darwin-libgit2-only" - fi - - download_files "${FILE_NAME}" - assure_provenance "${FILE_NAME}" - extract_libraries "${FILE_NAME}" "${DIR}" - fix_pkgconfigs -} - -install_libraries diff --git a/hack/update-attributions.sh b/hack/update-attributions.sh deleted file mode 100755 index 41c2d45ae..000000000 --- a/hack/update-attributions.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -set -eoux pipefail - -SPLIT="***" - -cat < ATTRIBUTIONS.md -# Attributions - -This application uses Open Source components. You can find the source -code of their open source projects along with license information below. -We acknowledge and are grateful to these developers for their contributions -to open source. - -## libgit2 - -Libgit2 was obtained in source-code form from its github repository: -https://github.com/libgit2/libgit2/ - -No changes were made to its original source code. - -Copyright notice (https://raw.githubusercontent.com/libgit2/libgit2/main/COPYING): - -$(curl --max-time 5 -L https://raw.githubusercontent.com/libgit2/libgit2/main/COPYING) -EOF diff --git a/pkg/azure/blob.go b/internal/bucket/azure/blob.go similarity index 65% rename from pkg/azure/blob.go rename to internal/bucket/azure/blob.go index d7c2a0652..5bf814b7d 100644 --- a/pkg/azure/blob.go +++ b/internal/bucket/azure/blob.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "net/http" "net/url" "os" "path/filepath" @@ -29,14 +30,18 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - _ "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "github.com/fluxcd/pkg/auth" + azureauth "github.com/fluxcd/pkg/auth/azure" "github.com/fluxcd/pkg/masktoken" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -59,7 +64,57 @@ const ( // BlobClient is a minimal Azure Blob client for fetching objects. type BlobClient struct { - *azblob.ServiceClient + *azblob.Client +} + +// Option configures the BlobClient. +type Option func(*options) + +// WithSecret sets the Secret to use for the BlobClient. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithProxyURL sets the proxy URL to use for the BlobClient. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +type options struct { + secret *corev1.Secret + proxyURL *url.URL + withoutCredentials bool + withoutRetries bool + authOpts []auth.Option +} + +// withoutCredentials forces the BlobClient to not use any credentials. +// This is a test-only option useful for testing the client with HTTP +// endpoints (without TLS) alongside all the other options unrelated to +// credentials. +func withoutCredentials() Option { + return func(o *options) { + o.withoutCredentials = true + } +} + +// withoutRetries sets the BlobClient to not retry requests. +// This is a test-only option useful for testing connection errors. +func withoutRetries() Option { + return func(o *options) { + o.withoutRetries = true + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } } // NewClient creates a new Azure Blob storage client. @@ -67,71 +122,98 @@ type BlobClient struct { // Bucket and Secret. It detects credentials in the Secret in the following // order: // -// - azidentity.ClientSecretCredential when `tenantId`, `clientId` and -// `clientSecret` fields are found. -// - azidentity.ClientCertificateCredential when `tenantId`, -// `clientCertificate` (and optionally `clientCertificatePassword`) fields -// are found. -// - azidentity.ManagedIdentityCredential for a User ID, when a `clientId` -// field but no `tenantId` is found. -// - azblob.SharedKeyCredential when an `accountKey` field is found. -// The account name is extracted from the endpoint specified on the Bucket -// object. -// - azidentity.ChainedTokenCredential with azidentity.EnvironmentCredential -// and azidentity.ManagedIdentityCredential. +// - azidentity.ClientSecretCredential when `tenantId`, `clientId` and +// `clientSecret` fields are found. +// - azidentity.ClientCertificateCredential when `tenantId`, +// `clientCertificate` (and optionally `clientCertificatePassword`) fields +// are found. +// - azidentity.ManagedIdentityCredential for a User ID, when a `clientId` +// field but no `tenantId` is found. +// - azidentity.WorkloadIdentityCredential for when environment variables +// (AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID) +// are set by the Azure workload identity webhook. +// - azblob.SharedKeyCredential when an `accountKey` field is found. +// The account name is extracted from the endpoint specified on the Bucket +// object. +// - azidentity.ChainedTokenCredential with azidentity.EnvironmentCredential +// and azidentity.ManagedIdentityCredential. // // If no credentials are found, and the azidentity.ChainedTokenCredential can // not be established. A simple client without credentials is returned. -func NewClient(obj *sourcev1.Bucket, secret *corev1.Secret) (c *BlobClient, err error) { +func NewClient(ctx context.Context, obj *sourcev1.Bucket, opts ...Option) (c *BlobClient, err error) { c = &BlobClient{} + var o options + for _, opt := range opts { + opt(&o) + } + + clientOpts := &azblob.ClientOptions{} + + if o.proxyURL != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(o.proxyURL) + clientOpts.ClientOptions.Transport = &http.Client{Transport: transport} + } + + if o.withoutRetries { + clientOpts.ClientOptions.Retry.ShouldRetry = func(resp *http.Response, err error) bool { + return false + } + } + + if o.withoutCredentials { + c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, clientOpts) + return + } + var token azcore.TokenCredential - if secret != nil && len(secret.Data) > 0 { + if o.secret != nil && len(o.secret.Data) > 0 { // Attempt AAD Token Credential options first. - if token, err = tokenCredentialFromSecret(secret); err != nil { - err = fmt.Errorf("failed to create token credential from '%s' Secret: %w", secret.Name, err) + if token, err = tokenCredentialFromSecret(o.secret); err != nil { + err = fmt.Errorf("failed to create token credential from '%s' Secret: %w", o.secret.Name, err) return } if token != nil { - c.ServiceClient, err = azblob.NewServiceClient(obj.Spec.Endpoint, token, nil) + c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, clientOpts) return } // Fallback to Shared Key Credential. var cred *azblob.SharedKeyCredential - if cred, err = sharedCredentialFromSecret(obj.Spec.Endpoint, secret); err != nil { + if cred, err = sharedCredentialFromSecret(obj.Spec.Endpoint, o.secret); err != nil { return } if cred != nil { - c.ServiceClient, err = azblob.NewServiceClientWithSharedKey(obj.Spec.Endpoint, cred, &azblob.ClientOptions{}) + c.Client, err = azblob.NewClientWithSharedKeyCredential(obj.Spec.Endpoint, cred, clientOpts) return } var fullPath string - if fullPath, err = sasTokenFromSecret(obj.Spec.Endpoint, secret); err != nil { + if fullPath, err = sasTokenFromSecret(obj.Spec.Endpoint, o.secret); err != nil { return } - c.ServiceClient, err = azblob.NewServiceClientWithNoCredential(fullPath, &azblob.ClientOptions{}) + c.Client, err = azblob.NewClientWithNoCredential(fullPath, clientOpts) return } // Compose token chain based on environment. // This functions as a replacement for azidentity.NewDefaultAzureCredential // to not shell out. - token, err = chainCredentialWithSecret(secret) + token, err = chainCredentialWithSecret(ctx, o.secret, o.authOpts...) if err != nil { err = fmt.Errorf("failed to create environment credential chain: %w", err) return nil, err } if token != nil { - c.ServiceClient, err = azblob.NewServiceClient(obj.Spec.Endpoint, token, nil) + c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, clientOpts) return } // Fallback to simple client. - c.ServiceClient, err = azblob.NewServiceClientWithNoCredential(obj.Spec.Endpoint, nil) + c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, clientOpts) return } @@ -176,19 +258,20 @@ func ValidateSecret(secret *corev1.Secret) error { // BucketExists returns if an object storage bucket with the provided name // exists, or returns a (client) error. func (c *BlobClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { - container, err := c.ServiceClient.NewContainerClient(bucketName) - if err != nil { - return false, err - } - _, err = container.GetProperties(ctx, nil) - if err != nil { - var stgErr *azblob.StorageError - if errors.As(err, &stgErr) { - if stgErr.ErrorCode == azblob.StorageErrorCodeContainerNotFound { - return false, nil - } - err = stgErr + items := c.Client.NewListBlobsFlatPager(bucketName, &azblob.ListBlobsFlatOptions{ + MaxResults: to.Ptr(int32(1)), + }) + // We call next page only once since we just want to see if we get an error + if _, err := items.NextPage(ctx); err != nil { + if bloberror.HasCode(err, bloberror.ContainerNotFound) { + return false, nil + } + + // For a container-level SASToken, we get an AuthenticationFailed when the bucket doesn't exist + if bloberror.HasCode(err, bloberror.AuthenticationFailed) { + return false, fmt.Errorf("the specified bucket name may be incorrect, nonexistent, or the caller might lack sufficient permissions to access it: %w", err) } + return false, err } return true, nil @@ -198,15 +281,6 @@ func (c *BlobClient) BucketExists(ctx context.Context, bucketName string) (bool, // writes it to targetPath. // It returns the etag of the successfully fetched file, or any error. func (c *BlobClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { - container, err := c.ServiceClient.NewContainerClient(bucketName) - if err != nil { - return "", err - } - blob, err := container.NewBlobClient(objectName) - if err != nil { - return "", err - } - // Verify if destination already exists. dirStatus, err := os.Stat(localPath) if err == nil { @@ -233,7 +307,7 @@ func (c *BlobClient) FGetObject(ctx context.Context, bucketName, objectName, loc } // Download object. - res, err := blob.Download(ctx, nil) + res, err := c.DownloadStream(ctx, bucketName, objectName, nil) if err != nil { return "", err } @@ -251,7 +325,7 @@ func (c *BlobClient) FGetObject(ctx context.Context, bucketName, objectName, loc // Off we go. mw := io.MultiWriter(f, hash) - if _, err = io.Copy(mw, res.Body(nil)); err != nil { + if _, err = io.Copy(mw, res.Body); err != nil { if err = f.Close(); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to close file after copy error") } @@ -260,63 +334,52 @@ func (c *BlobClient) FGetObject(ctx context.Context, bucketName, objectName, loc if err = f.Close(); err != nil { return "", err } - return *res.ETag, nil + + return string(*res.ETag), nil } // VisitObjects iterates over the items in the provided object storage // bucket, calling visit for every item. // If the underlying client or the visit callback returns an error, // it returns early. -func (c *BlobClient) VisitObjects(ctx context.Context, bucketName string, visit func(path, etag string) error) error { - container, err := c.ServiceClient.NewContainerClient(bucketName) - if err != nil { - return err - } - - items := container.ListBlobsFlat(&azblob.ContainerListBlobsFlatOptions{}) - for items.NextPage(ctx) { - resp := items.PageResponse() +func (c *BlobClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(path, etag string) error) error { + items := c.NewListBlobsFlatPager(bucketName, nil) + for items.More() { + resp, err := items.NextPage(ctx) + if err != nil { + err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, err) + return err + } for _, blob := range resp.Segment.BlobItems { - if err := visit(*blob.Name, fmt.Sprintf("%x", *blob.Properties.Etag)); err != nil { + if err := visit(*blob.Name, fmt.Sprintf("%x", *blob.Properties.ETag)); err != nil { err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, err) return err } } } - if err := items.Err(); err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, err) - return err - } + return nil } // Close has no effect on BlobClient. -func (c *BlobClient) Close(_ context.Context) { - return -} +func (c *BlobClient) Close(_ context.Context) {} // ObjectIsNotFound checks if the error provided is an azblob.StorageError with // an azblob.StorageErrorCodeBlobNotFound error code. func (c *BlobClient) ObjectIsNotFound(err error) bool { - var stgErr *azblob.StorageError - if errors.As(err, &stgErr) { - if stgErr.ErrorCode == azblob.StorageErrorCodeBlobNotFound { - return true - } - } - return false + return bloberror.HasCode(err, bloberror.BlobNotFound) } // tokenCredentialsFromSecret attempts to create an azcore.TokenCredential // based on the data fields of the given Secret. It returns, in order: -// - azidentity.ClientSecretCredential when `tenantId`, `clientId` and -// `clientSecret` fields are found. -// - azidentity.ClientCertificateCredential when `tenantId`, -// `clientCertificate` (and optionally `clientCertificatePassword`) fields -// are found. -// - azidentity.ManagedIdentityCredential for a User ID, when a `clientId` -// field but no `tenantId` is found. -// - Nil, if no valid set of credential fields was found. +// - azidentity.ClientSecretCredential when `tenantId`, `clientId` and +// `clientSecret` fields are found. +// - azidentity.ClientCertificateCredential when `tenantId`, +// `clientCertificate` (and optionally `clientCertificatePassword`) fields +// are found. +// - azidentity.ManagedIdentityCredential for a User ID, when a `clientId` +// field but no `tenantId` is found. +// - Nil, if no valid set of credential fields was found. func tokenCredentialFromSecret(secret *corev1.Secret) (azcore.TokenCredential, error) { if secret == nil { return nil, nil @@ -407,14 +470,17 @@ func sasTokenFromSecret(ep string, secret *corev1.Secret) (string, error) { // azidentity.ChainedTokenCredential if at least one of the following tokens was // successfully created: // -// - azidentity.EnvironmentCredential with `authorityHost` from Secret, if -// provided. -// - azidentity.ManagedIdentityCredential with Client ID from AZURE_CLIENT_ID -// environment variable, if found. -// - azidentity.ManagedIdentityCredential with defaults. +// - azidentity.EnvironmentCredential with `authorityHost` from Secret, if +// provided. +// - azidentity.WorkloadIdentityCredential with Client ID from AZURE_CLIENT_ID plus +// AZURE_TENANT_ID, AZURE_FEDERATED_TOKEN_FILE from environment variables +// environment variable, if found. +// - azidentity.ManagedIdentityCredential with Client ID from AZURE_CLIENT_ID +// environment variable, if found. +// - azidentity.ManagedIdentityCredential with defaults. // // If no valid token is created, it returns nil. -func chainCredentialWithSecret(secret *corev1.Secret) (azcore.TokenCredential, error) { +func chainCredentialWithSecret(ctx context.Context, secret *corev1.Secret, opts ...auth.Option) (azcore.TokenCredential, error) { var creds []azcore.TokenCredential credOpts := &azidentity.EnvironmentCredentialOptions{} @@ -427,14 +493,7 @@ func chainCredentialWithSecret(secret *corev1.Secret) (azcore.TokenCredential, e if token, _ := azidentity.NewEnvironmentCredential(credOpts); token != nil { creds = append(creds, token) } - if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" { - if token, _ := azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{ - ID: azidentity.ClientID(clientID), - }); token != nil { - creds = append(creds, token) - } - } - if token, _ := azidentity.NewManagedIdentityCredential(nil); token != nil { + if token := azureauth.NewTokenCredential(ctx, opts...); token != nil { creds = append(creds, token) } diff --git a/pkg/azure/blob_integration_test.go b/internal/bucket/azure/blob_integration_test.go similarity index 59% rename from pkg/azure/blob_integration_test.go rename to internal/bucket/azure/blob_integration_test.go index a00a90331..704b4c0c3 100644 --- a/pkg/azure/blob_integration_test.go +++ b/internal/bucket/azure/blob_integration_test.go @@ -21,22 +21,30 @@ package azure import ( "context" "crypto/md5" + "crypto/rand" "encoding/hex" "errors" "fmt" - "math/rand" + "log" "os" "path/filepath" "strings" "testing" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -46,6 +54,7 @@ var ( var ( testAccountName = os.Getenv("TEST_AZURE_ACCOUNT_NAME") testAccountKey = os.Getenv("TEST_AZURE_ACCOUNT_KEY") + cred *azblob.SharedKeyCredential ) var ( @@ -72,11 +81,12 @@ test: file2 } ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - func TestMain(m *testing.M) { + var err error + cred, err = blob.NewSharedKeyCredential(testAccountName, testAccountKey) + if err != nil { + log.Fatalf("unable to create shared key creds: %s", err.Error()) + } code := m.Run() os.Exit(code) } @@ -84,7 +94,7 @@ func TestMain(m *testing.M) { func TestBlobClient_BucketExists(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -110,7 +120,7 @@ func TestBlobClient_BucketExists(t *testing.T) { func TestBlobClient_BucketNotExists(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -130,7 +140,7 @@ func TestBlobClient_FGetObject(t *testing.T) { tempDir := t.TempDir() - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -148,7 +158,8 @@ func TestBlobClient_FGetObject(t *testing.T) { // Create test blob. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - g.Expect(createBlob(ctx, client, testContainer, testFile, testFileData)) + + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) localPath := filepath.Join(tempDir, testFile) @@ -169,12 +180,10 @@ func TestBlobClientSASKey_FGetObject(t *testing.T) { tempDir := t.TempDir() // create a client with the shared key - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) - g.Expect(client.CanGetAccountSASToken()).To(BeTrue()) - // Generate test container name. testContainer := generateString(testContainerGenerateName) @@ -189,19 +198,21 @@ func TestBlobClientSASKey_FGetObject(t *testing.T) { // Create test blob. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - g.Expect(createBlob(ctx, client, testContainer, testFile, testFileData)) - + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)).To(Succeed()) localPath := filepath.Join(tempDir, testFile) // use the shared key client to create a SAS key for the account - sasKey, err := client.GetSASToken(azblob.AccountSASResourceTypes{Object: true, Container: true}, - azblob.AccountSASPermissions{List: true, Read: true}, - azblob.AccountSASServices{Blob: true}, - time.Now(), - time.Now().Add(48*time.Hour)) + cred, err := service.NewSharedKeyCredential(testAccountName, testAccountKey) + g.Expect(err).ToNot(HaveOccurred()) + url := fmt.Sprintf("https://%s.blob.core.windows.net", testAccountName) + serviceClient, err := service.NewClientWithSharedKeyCredential(url, cred, nil) + g.Expect(err).ToNot(HaveOccurred()) + sasKey, err := serviceClient.GetSASURL(sas.AccountResourceTypes{Object: true, Container: true}, + sas.AccountPermissions{List: true, Read: true}, + time.Now().Add(48*time.Hour), + &service.GetSASURLOptions{}) g.Expect(err).ToNot(HaveOccurred()) g.Expect(sasKey).ToNot(BeEmpty()) - // the sdk returns the full SAS url e.g test.blob.core.windows.net/? sasKey = strings.TrimPrefix(sasKey, testBucket.Spec.Endpoint+"/") testSASKeySecret := corev1.Secret{ @@ -210,24 +221,94 @@ func TestBlobClientSASKey_FGetObject(t *testing.T) { }, } - sasKeyClient, err := NewClient(testBucket.DeepCopy(), testSASKeySecret.DeepCopy()) + sasKeyClient, err := NewClient(testBucket.DeepCopy(), WithSecret(testSASKeySecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) - // Test if blob exists using sasKey. + // Test if bucket and blob exists using sasKey. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - _, err = sasKeyClient.FGetObject(ctx, testContainer, testFile, localPath) + ok, err := sasKeyClient.BucketExists(ctx, testContainer) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + _, err = client.FGetObject(ctx, testContainer, testFile, localPath) g.Expect(err).ToNot(HaveOccurred()) + _, err = sasKeyClient.FGetObject(ctx, testContainer, testFile, localPath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(localPath).To(BeARegularFile()) f, _ := os.ReadFile(localPath) g.Expect(f).To(Equal([]byte(testFileData))) } +func TestBlobClientContainerSASKey_BucketExists(t *testing.T) { + g := NewWithT(t) + + // create a client with the shared key + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Create test blob. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) + + // use the container client to create a container-level SAS key for the account + cred, err := container.NewSharedKeyCredential(testAccountName, testAccountKey) + g.Expect(err).ToNot(HaveOccurred()) + url := fmt.Sprintf("https://%s.blob.core.windows.net/%s", testAccountName, testContainer) + containerClient, err := container.NewClientWithSharedKeyCredential(url, cred, nil) + g.Expect(err).ToNot(HaveOccurred()) + // sasKey + sasKey, err := containerClient.GetSASURL(sas.ContainerPermissions{Read: true, List: true}, + time.Now().Add(48*time.Hour), &container.GetSASURLOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(sasKey).ToNot(BeEmpty()) + + // the sdk returns the full SAS url e.g test.blob.core.windows.net//? + sasKey = strings.TrimPrefix(sasKey, testBucket.Spec.Endpoint+"/"+testContainer) + testSASKeySecret := corev1.Secret{ + Data: map[string][]byte{ + sasKeyField: []byte(sasKey), + }, + } + + sasKeyClient, err := NewClient(testBucket.DeepCopy(), WithSecret(testSASKeySecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + + // Test if bucket and blob exists using sasKey. + ok, err := sasKeyClient.BucketExists(ctx, testContainer) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + // BucketExists returns an error if the bucket doesn't exist with container level SAS + // since the error code is AuthenticationFailed. + ok, err = sasKeyClient.BucketExists(ctx, "non-existent") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("Bucket name may be incorrect, it does not exist")) + g.Expect(ok).To(BeFalse()) +} + func TestBlobClient_FGetObject_NotFoundErr(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -254,7 +335,7 @@ func TestBlobClient_FGetObject_NotFoundErr(t *testing.T) { func TestBlobClient_VisitObjects(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -272,15 +353,15 @@ func TestBlobClient_VisitObjects(t *testing.T) { // Create test blobs. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - g.Expect(createBlob(ctx, client, testContainer, testFile, testFileData)) - g.Expect(createBlob(ctx, client, testContainer, testFile2, testFile2Data)) + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) + g.Expect(createBlob(ctx, cred, testContainer, testFile2, testFile2Data)) visits := make(map[string]string) // Visit objects. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - got := client.VisitObjects(ctx, testContainer, func(path, etag string) error { + got := client.VisitObjects(ctx, testContainer, "", func(path, etag string) error { visits[path] = etag return nil }) @@ -294,7 +375,7 @@ func TestBlobClient_VisitObjects(t *testing.T) { func TestBlobClient_VisitObjects_CallbackErr(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -312,13 +393,13 @@ func TestBlobClient_VisitObjects_CallbackErr(t *testing.T) { // Create test blob. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - g.Expect(createBlob(ctx, client, testContainer, testFile, testFileData)) + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) // Visit object. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() mockErr := fmt.Errorf("mock") - err = client.VisitObjects(ctx, testContainer, func(path, etag string) error { + err = client.VisitObjects(ctx, testContainer, "", func(path, etag string) error { return mockErr }) g.Expect(err).To(HaveOccurred()) @@ -327,9 +408,9 @@ func TestBlobClient_VisitObjects_CallbackErr(t *testing.T) { func createContainer(ctx context.Context, client *BlobClient, name string) error { if _, err := client.CreateContainer(ctx, name, nil); err != nil { - var stgErr *azblob.StorageError + var stgErr *azcore.ResponseError if errors.As(err, &stgErr) { - if stgErr.ErrorCode == azblob.StorageErrorCodeContainerAlreadyExists { + if stgErr.ErrorCode == string(bloberror.ContainerAlreadyExists) { return nil } err = stgErr @@ -339,19 +420,22 @@ func createContainer(ctx context.Context, client *BlobClient, name string) error return nil } -func createBlob(ctx context.Context, client *BlobClient, containerName, name, data string) error { - container := client.NewContainerClient(containerName) - blob := container.NewAppendBlobClient(name) - +func createBlob(ctx context.Context, cred *blob.SharedKeyCredential, containerName, name, data string) error { + blobURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", testAccountName, containerName, name) + blobC, err := appendblob.NewClientWithSharedKeyCredential(blobURL, cred, nil) + if err != nil { + return err + } ctx, timeout := context.WithTimeout(context.Background(), testTimeout) defer timeout() - if _, err := blob.Create(ctx, nil); err != nil { + if _, err := blobC.Create(ctx, nil); err != nil { return err } hash := md5.Sum([]byte(data)) - if _, err := blob.AppendBlock(ctx, streaming.NopCloser(strings.NewReader(data)), &azblob.AppendBlockOptions{ - TransactionalContentMD5: hash[:16], + + if _, err := blobC.AppendBlock(ctx, streaming.NopCloser(strings.NewReader(data)), &appendblob.AppendBlockOptions{ + TransactionalValidation: blob.TransferValidationTypeMD5(hash[:16]), }); err != nil { return err } @@ -360,13 +444,8 @@ func createBlob(ctx context.Context, client *BlobClient, containerName, name, da func deleteContainer(ctx context.Context, client *BlobClient, name string) error { if _, err := client.DeleteContainer(ctx, name, nil); err != nil { - var stgErr *azblob.StorageError - if errors.As(err, &stgErr) { - if code := stgErr.ErrorCode; code == azblob.StorageErrorCodeContainerNotFound || - code == azblob.StorageErrorCodeContainerBeingDeleted { - return nil - } - err = stgErr + if bloberror.HasCode(err, bloberror.ContainerNotFound, bloberror.ContainerBeingDeleted) { + return nil } return err } diff --git a/pkg/azure/blob_test.go b/internal/bucket/azure/blob_test.go similarity index 79% rename from pkg/azure/blob_test.go rename to internal/bucket/azure/blob_test.go index 36f5b5b56..83f17e900 100644 --- a/pkg/azure/blob_test.go +++ b/internal/bucket/azure/blob_test.go @@ -18,6 +18,7 @@ package azure import ( "bytes" + "context" "crypto/rand" "crypto/rsa" "crypto/x509" @@ -25,16 +26,106 @@ import ( "errors" "fmt" "math/big" + "net/http" "net/url" "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testlistener "github.com/fluxcd/source-controller/tests/listener" + testproxy "github.com/fluxcd/source-controller/tests/proxy" ) +func TestNewClientAndBucketExistsWithProxy(t *testing.T) { + g := NewWithT(t) + + proxyAddr, _ := testproxy.New(t) + + // start mock bucket server + bucketListener, bucketAddr, _ := testlistener.New(t) + bucketEndpoint := fmt.Sprintf("http://%s", bucketAddr) + bucketHandler := http.NewServeMux() + bucketHandler.HandleFunc("GET /podinfo", func(w http.ResponseWriter, r *http.Request) { + // verify query params comp=list&maxresults=1&restype=container + q := r.URL.Query() + g.Expect(q.Get("comp")).To(Equal("list")) + g.Expect(q.Get("maxresults")).To(Equal("1")) + g.Expect(q.Get("restype")).To(Equal("container")) + // the azure library does not expose the struct for this response + // and copying its definition yields a strange "unsupported type" + // error when marshaling to xml, so we just hardcode a valid response + // here + resp := fmt.Sprintf(` + +1 + + +`, bucketEndpoint) + _, err := w.Write([]byte(resp)) + g.Expect(err).ToNot(HaveOccurred()) + }) + bucketServer := &http.Server{ + Addr: bucketAddr, + Handler: bucketHandler, + } + go bucketServer.Serve(bucketListener) + defer bucketServer.Shutdown(context.Background()) + + tests := []struct { + name string + endpoint string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + endpoint: bucketEndpoint, + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect proxy", + endpoint: bucketEndpoint, + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", 1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + bucket := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + Endpoint: tt.endpoint, + }, + } + + client, err := NewClient(t.Context(), + bucket, + WithProxyURL(tt.proxyURL), + withoutCredentials(), + withoutRetries()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + ok, err := client.BucketExists(context.Background(), "podinfo") + if tt.err != "" { + g.Expect(err).To(MatchError(ContainSubstring(tt.err))) + g.Expect(ok).To(BeFalse()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + } + }) + } +} + func TestValidateSecret(t *testing.T) { tests := []struct { name string @@ -133,12 +224,12 @@ func TestBlobClient_ObjectIsNotFound(t *testing.T) { }{ { name: "StorageError with BlobNotFound code", - err: &azblob.StorageError{ErrorCode: azblob.StorageErrorCodeBlobNotFound}, + err: &azcore.ResponseError{ErrorCode: string(bloberror.BlobNotFound)}, want: true, }, { name: "StorageError with different code", - err: &azblob.StorageError{ErrorCode: azblob.StorageErrorCodeInternalError}, + err: &azcore.ResponseError{ErrorCode: string(bloberror.InternalError)}, }, { name: "other error", @@ -363,16 +454,15 @@ func Test_sasTokenFromSecret(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - _, err := url.ParseQuery("") got, err := sasTokenFromSecret(tt.endpoint, tt.secret) g.Expect(err != nil).To(Equal(tt.wantErr)) if tt.want != "" { - ttVaules, err := url.Parse(tt.want) + ttValues, err := url.Parse(tt.want) g.Expect(err).To(BeNil()) gotValues, err := url.Parse(got) g.Expect(err).To(BeNil()) - g.Expect(gotValues.Query()).To(Equal(ttVaules.Query())) + g.Expect(gotValues.Query()).To(Equal(ttValues.Query())) return } g.Expect(got).To(Equal("")) @@ -383,7 +473,7 @@ func Test_sasTokenFromSecret(t *testing.T) { func Test_chainCredentialWithSecret(t *testing.T) { g := NewWithT(t) - got, err := chainCredentialWithSecret(nil) + got, err := chainCredentialWithSecret(t.Context(), nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(BeAssignableToTypeOf(&azidentity.ChainedTokenCredential{})) } diff --git a/pkg/gcp/gcp.go b/internal/bucket/gcp/gcp.go similarity index 56% rename from pkg/gcp/gcp.go rename to internal/bucket/gcp/gcp.go index 419885cbb..70afe9fcd 100644 --- a/pkg/gcp/gcp.go +++ b/internal/bucket/gcp/gcp.go @@ -21,15 +21,24 @@ import ( "errors" "fmt" "io" + "net/http" + "net/url" "os" "path/filepath" gcpstorage "cloud.google.com/go/storage" "github.com/go-logr/logr" + "golang.org/x/oauth2/google" "google.golang.org/api/iterator" "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/auth" + gcpauth "github.com/fluxcd/pkg/auth/gcp" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -48,24 +57,110 @@ type GCSClient struct { *gcpstorage.Client } -// NewClient creates a new GCP storage client. The Client will automatically look for the Google Application +// Option is a functional option for configuring the GCS client. +type Option func(*options) + +// WithSecret sets the secret to use for authenticating with GCP. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithProxyURL sets the proxy URL to use for the GCS client. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + +type options struct { + secret *corev1.Secret + proxyURL *url.URL + authOpts []auth.Option + + // newCustomHTTPClient should create a new HTTP client for interacting with the GCS API. + // This is a test-only option required for mocking the real logic, which requires either + // a valid Google Service Account Key or Controller-Level Workload Identity. Both are not available in tests. + // The real logic is implemented in the newHTTPClient function, which is used when + // constructing the default options object. + newCustomHTTPClient func(context.Context, *options) (*http.Client, error) +} + +func newOptions() *options { + return &options{ + newCustomHTTPClient: newHTTPClient, + } +} + +// NewClient creates a new GCP storage client. The Client will automatically look for the Google Application // Credential environment variable or look for the Google Application Credential file. -func NewClient(ctx context.Context, secret *corev1.Secret) (*GCSClient, error) { - c := &GCSClient{} - if secret != nil { - client, err := gcpstorage.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) +func NewClient(ctx context.Context, bucket *sourcev1.Bucket, opts ...Option) (*GCSClient, error) { + o := newOptions() + for _, opt := range opts { + opt(o) + } + + var clientOpts []option.ClientOption + + switch { + case o.secret != nil && o.proxyURL == nil: + clientOpts = append(clientOpts, option.WithCredentialsJSON(o.secret.Data["serviceaccount"])) + case o.secret == nil && o.proxyURL == nil: + tokenSource := gcpauth.NewTokenSource(ctx, o.authOpts...) + clientOpts = append(clientOpts, option.WithTokenSource(tokenSource)) + default: // o.proxyURL != nil: + httpClient, err := o.newCustomHTTPClient(ctx, o) if err != nil { return nil, err } - c.Client = client - } else { - client, err := gcpstorage.NewClient(ctx) + clientOpts = append(clientOpts, option.WithHTTPClient(httpClient)) + } + + client, err := gcpstorage.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + return &GCSClient{client}, nil +} + +// newHTTPClient creates a new HTTP client for interacting with Google Cloud APIs. +func newHTTPClient(ctx context.Context, o *options) (*http.Client, error) { + baseTransport := http.DefaultTransport.(*http.Transport).Clone() + if o.proxyURL != nil { + baseTransport.Proxy = http.ProxyURL(o.proxyURL) + } + + var opts []option.ClientOption + + if o.secret != nil { + // Here we can't use option.WithCredentialsJSON() because htransport.NewTransport() + // won't know what scopes to use and yield a 400 Bad Request error when retrieving + // the OAuth token. Instead we use google.CredentialsFromJSON(), which allows us to + // specify the GCS read-only scope. + creds, err := google.CredentialsFromJSON(ctx, o.secret.Data["serviceaccount"], gcpstorage.ScopeReadOnly) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create Google credentials from secret: %w", err) } - c.Client = client + opts = append(opts, option.WithCredentials(creds)) + } else { // Workload Identity. + tokenSource := gcpauth.NewTokenSource(ctx, o.authOpts...) + opts = append(opts, option.WithTokenSource(tokenSource)) + } + + transport, err := htransport.NewTransport(ctx, baseTransport, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create Google HTTP transport: %w", err) } - return c, nil + return &http.Client{Transport: transport}, nil } // ValidateSecret validates the credential secret. The provided Secret may @@ -84,14 +179,14 @@ func ValidateSecret(secret *corev1.Secret) error { // exists, or returns a (client) error. func (c *GCSClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) - if err == gcpstorage.ErrBucketNotExist { + if err == nil { + return true, nil + } + if errors.Is(err, gcpstorage.ErrBucketNotExist) { // Not returning error to be compatible with minio's API. return false, nil } - if err != nil { - return false, err - } - return true, nil + return false, err } // FGetObject gets the object from the provided object storage bucket, and @@ -165,8 +260,10 @@ func (c *GCSClient) FGetObject(ctx context.Context, bucketName, objectName, loca // bucket, calling visit for every item. // If the underlying client or the visit callback returns an error, // it returns early. -func (c *GCSClient) VisitObjects(ctx context.Context, bucketName string, visit func(path, etag string) error) error { - items := c.Client.Bucket(bucketName).Objects(ctx, nil) +func (c *GCSClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(path, etag string) error) error { + items := c.Client.Bucket(bucketName).Objects(ctx, &gcpstorage.Query{ + Prefix: prefix, + }) for { object, err := items.Next() if err == IteratorDone { diff --git a/pkg/gcp/gcp_test.go b/internal/bucket/gcp/gcp_test.go similarity index 64% rename from pkg/gcp/gcp_test.go rename to internal/bucket/gcp/gcp_test.go index 4ab98b7a5..0c12a72ea 100644 --- a/pkg/gcp/gcp_test.go +++ b/internal/bucket/gcp/gcp_test.go @@ -26,19 +26,23 @@ import ( "net" "net/http" "net/http/httptest" + "net/url" "os" "path/filepath" "testing" "time" + "cloud.google.com/go/compute/metadata" gcpstorage "cloud.google.com/go/storage" + . "github.com/onsi/gomega" "google.golang.org/api/googleapi" + "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" - "gotest.tools/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "google.golang.org/api/option" + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testproxy "github.com/fluxcd/source-controller/tests/proxy" ) const ( @@ -46,10 +50,13 @@ const ( objectName string = "test.yaml" objectGeneration int64 = 3 objectEtag string = "bFbHCDvedeecefdgmfmhfuRxBdcedGe96S82XJOAXxjJpk=" + envGCSHost string = "STORAGE_EMULATOR_HOST" + envADC string = "GOOGLE_APPLICATION_CREDENTIALS" ) var ( hc *http.Client + host string client *gcpstorage.Client close func() err error @@ -75,8 +82,24 @@ var ( } ) +// createTestBucket creates a test bucket for testing purposes +func createTestBucket() *sourcev1.Bucket { + return &sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Endpoint: "storage.googleapis.com", + Provider: sourcev1.BucketProviderGoogle, + Interval: v1.Duration{Duration: time.Minute * 5}, + }, + } +} + func TestMain(m *testing.M) { - hc, close = newTestServer(func(w http.ResponseWriter, r *http.Request) { + hc, host, close = newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(io.Discard, r.Body) switch r.RequestURI { case fmt.Sprintf("/storage/v1/b/%s?alt=json&prettyPrint=false&projection=full", bucketName): @@ -103,6 +126,8 @@ func TestMain(m *testing.M) { } case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&includeTrailingDelimiter=false&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): + case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&includeTrailingDelimiter=false&matchGlob=&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): + case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&includeFoldersAsPrefixes=false&includeTrailingDelimiter=false&matchGlob=&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): w.WriteHeader(200) response := &raw.Objects{} response.Items = append(response.Items, getObject()) @@ -138,10 +163,102 @@ func TestMain(m *testing.M) { } func TestNewClientWithSecretErr(t *testing.T) { - gcpClient, err := NewClient(context.Background(), secret.DeepCopy()) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, WithSecret(secret.DeepCopy())) t.Log(err) - assert.Error(t, err, "dialing: invalid character 'e' looking for beginning of value") - assert.Assert(t, gcpClient == nil) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("dialing: invalid character 'e' looking for beginning of value")) + g.Expect(gcpClient).To(BeNil()) +} + +func TestNewClientWithProxyErr(t *testing.T) { + _, envADCIsSet := os.LookupEnv(envADC) + g := NewWithT(t) + g.Expect(envADCIsSet).To(BeFalse()) + g.Expect(metadata.OnGCE()).To(BeFalse()) + + t.Run("with secret", func(t *testing.T) { + g := NewWithT(t) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, + WithProxyURL(&url.URL{}), + WithSecret(secret.DeepCopy())) + g.Expect(err).To(HaveOccurred()) + g.Expect(gcpClient).To(BeNil()) + g.Expect(err.Error()).To(Equal("failed to create Google credentials from secret: invalid character 'e' looking for beginning of value")) + }) + + t.Run("without secret", func(t *testing.T) { + g := NewWithT(t) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, + WithProxyURL(&url.URL{})) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gcpClient).NotTo(BeNil()) + bucketAttrs, err := gcpClient.Client.Bucket("some-bucket").Attrs(context.Background()) + g.Expect(err).To(HaveOccurred()) + g.Expect(bucketAttrs).To(BeNil()) + g.Expect(err.Error()).To(ContainSubstring("failed to create provider access token")) + }) +} + +func TestProxy(t *testing.T) { + proxyAddr, proxyPort := testproxy.New(t) + + err := os.Setenv(envGCSHost, fmt.Sprintf("https://%s", host)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + defer func() { + err := os.Unsetenv(envGCSHost) + g.Expect(err).NotTo(HaveOccurred()) + }() + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct address", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect address", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + opts := []Option{WithProxyURL(tt.proxyURL)} + opts = append(opts, func(o *options) { + o.newCustomHTTPClient = func(ctx context.Context, o *options) (*http.Client, error) { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyURL(o.proxyURL), + } + return &http.Client{Transport: transport}, nil + } + }) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gcpClient).NotTo(BeNil()) + gcpClient.Client.SetRetry(gcpstorage.WithMaxAttempts(1)) + exists, err := gcpClient.BucketExists(context.Background(), bucketName) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) + } + }) + } } func TestBucketExists(t *testing.T) { @@ -149,8 +266,9 @@ func TestBucketExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucketName) - assert.NilError(t, err) - assert.Assert(t, exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) } func TestBucketNotExists(t *testing.T) { @@ -159,8 +277,9 @@ func TestBucketNotExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucket) - assert.NilError(t, err) - assert.Assert(t, !exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) } func TestVisitObjects(t *testing.T) { @@ -169,25 +288,29 @@ func TestVisitObjects(t *testing.T) { } keys := []string{} etags := []string{} - err := gcpClient.VisitObjects(context.Background(), bucketName, func(key, etag string) error { + err := gcpClient.VisitObjects(context.Background(), bucketName, "", func(key, etag string) error { keys = append(keys, key) etags = append(etags, etag) return nil }) - assert.NilError(t, err) - assert.DeepEqual(t, keys, []string{objectName}) - assert.DeepEqual(t, etags, []string{objectEtag}) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) } func TestVisitObjectsErr(t *testing.T) { + g := NewWithT(t) gcpClient := &GCSClient{ Client: client, } badBucketName := "bad-bucket" - err := gcpClient.VisitObjects(context.Background(), badBucketName, func(key, etag string) error { + err := gcpClient.VisitObjects(context.Background(), badBucketName, "", func(key, etag string) error { return nil }) - assert.Error(t, err, fmt.Sprintf("listing objects from bucket '%s' failed: storage: bucket doesn't exist", badBucketName)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring( + fmt.Sprintf("listing objects from bucket '%s' failed: storage: bucket doesn't exist", badBucketName))) } func TestVisitObjectsCallbackErr(t *testing.T) { @@ -195,13 +318,16 @@ func TestVisitObjectsCallbackErr(t *testing.T) { Client: client, } mockErr := fmt.Errorf("mock") - err := gcpClient.VisitObjects(context.Background(), bucketName, func(key, etag string) error { + err := gcpClient.VisitObjects(context.Background(), bucketName, "", func(key, etag string) error { return mockErr }) - assert.Error(t, err, mockErr.Error()) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) } func TestFGetObject(t *testing.T) { + g := NewWithT(t) tempDir := t.TempDir() gcpClient := &GCSClient{ Client: client, @@ -209,33 +335,34 @@ func TestFGetObject(t *testing.T) { localPath := filepath.Join(tempDir, objectName) etag, err := gcpClient.FGetObject(context.Background(), bucketName, objectName, localPath) if err != io.EOF { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } - assert.Equal(t, etag, objectEtag) + g.Expect(etag).To(Equal(objectEtag)) } func TestFGetObjectNotExists(t *testing.T) { + g := NewWithT(t) object := "notexists.txt" tempDir := t.TempDir() gcsClient := &GCSClient{ Client: client, } localPath := filepath.Join(tempDir, object) - _, err = gcsClient.FGetObject(context.Background(), bucketName, object, localPath) - if err != io.EOF { - assert.Error(t, err, "storage: object doesn't exist") - assert.Check(t, gcsClient.ObjectIsNotFound(err)) - } + _, err := gcsClient.FGetObject(context.Background(), bucketName, object, localPath) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("storage: object doesn't exist")) } func TestFGetObjectDirectoryIsFileName(t *testing.T) { + g := NewWithT(t) tempDir := t.TempDir() gcpClient := &GCSClient{ Client: client, } _, err = gcpClient.FGetObject(context.Background(), bucketName, objectName, tempDir) if err != io.EOF { - assert.Error(t, err, "filename is a directory") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("filename is a directory")) } } @@ -261,25 +388,28 @@ func TestValidateSecret(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() err := ValidateSecret(tt.secret) + g := NewWithT(t) if tt.error { - assert.Error(t, err, fmt.Sprintf("invalid '%v' secret data: required fields 'serviceaccount'", tt.secret.Name)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'serviceaccount'", tt.secret.Name))) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } } -func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { +func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, string, func()) { ts := httptest.NewTLSServer(http.HandlerFunc(handler)) + host := ts.Listener.Addr().String() tlsConf := &tls.Config{InsecureSkipVerify: true} tr := &http.Transport{ TLSClientConfig: tlsConf, DialTLS: func(netw, addr string) (net.Conn, error) { - return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) + return tls.Dial("tcp", host, tlsConf) }, } - return &http.Client{Transport: tr}, func() { + return &http.Client{Transport: tr}, host, func() { tr.CloseIdleConnections() ts.Close() } @@ -308,6 +438,7 @@ func getObject() *raw.Object { func getBucket() *raw.Bucket { labels := map[string]string{"a": "b"} matchClasses := []string{"STANDARD"} + age := int64(10) aTime := time.Date(2021, 1, 2, 0, 0, 0, 0, time.UTC) rb := &raw.Bucket{ Name: bucketName, @@ -327,7 +458,7 @@ func getBucket() *raw.Bucket { StorageClass: "NEARLINE", }, Condition: &raw.BucketLifecycleRuleCondition{ - Age: 10, + Age: &age, IsLive: googleapi.Bool(true), CreatedBefore: "2021-01-02", MatchesStorageClass: matchClasses, diff --git a/internal/bucket/minio/minio.go b/internal/bucket/minio/minio.go new file mode 100644 index 000000000..026200a83 --- /dev/null +++ b/internal/bucket/minio/minio.go @@ -0,0 +1,373 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package minio + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + corev1 "k8s.io/api/core/v1" + + "github.com/fluxcd/pkg/auth" + awsauth "github.com/fluxcd/pkg/auth/aws" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +// MinioClient is a minimal Minio client for fetching files from S3 compatible +// storage APIs. +type MinioClient struct { + *minio.Client +} + +// options holds the configuration for the Minio client. +type options struct { + secret *corev1.Secret + stsSecret *corev1.Secret + tlsConfig *tls.Config + stsTLSConfig *tls.Config + proxyURL *url.URL + authOpts []auth.Option +} + +// Option is a function that configures the Minio client. +type Option func(*options) + +// WithSecret sets the secret for the Minio client. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithTLSConfig sets the TLS configuration for the Minio client. +func WithTLSConfig(tlsConfig *tls.Config) Option { + return func(o *options) { + o.tlsConfig = tlsConfig + } +} + +// WithProxyURL sets the proxy URL for the Minio client. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +// WithSTSSecret sets the STS secret for the Minio client. +func WithSTSSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.stsSecret = secret + } +} + +// WithSTSTLSConfig sets the STS TLS configuration for the Minio client. +func WithSTSTLSConfig(tlsConfig *tls.Config) Option { + return func(o *options) { + o.stsTLSConfig = tlsConfig + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + +// NewClient creates a new Minio storage client. +func NewClient(ctx context.Context, bucket *sourcev1.Bucket, opts ...Option) (*MinioClient, error) { + var o options + for _, opt := range opts { + opt(&o) + } + + minioOpts := minio.Options{ + Region: bucket.Spec.Region, + Secure: !bucket.Spec.Insecure, + // About BucketLookup, it should be noted that not all S3 providers support + // path-type access (e.g., Ali OSS). Hence, we revert to using the default + // auto access, which we believe can cover most use cases. + } + + switch bucketProvider := bucket.Spec.Provider; { + case o.secret != nil: + minioOpts.Creds = newCredsFromSecret(o.secret) + case bucketProvider == sourcev1.BucketProviderAmazon: + creds, err := newAWSCreds(ctx, &o) + if err != nil { + return nil, err + } + minioOpts.Creds = creds + case bucketProvider == sourcev1.BucketProviderGeneric: + minioOpts.Creds = newGenericCreds(bucket, &o) + } + + var transportOpts []func(*http.Transport) + + if minioOpts.Secure && o.tlsConfig != nil { + transportOpts = append(transportOpts, func(t *http.Transport) { + t.TLSClientConfig = o.tlsConfig.Clone() + }) + } + + if o.proxyURL != nil { + transportOpts = append(transportOpts, func(t *http.Transport) { + t.Proxy = http.ProxyURL(o.proxyURL) + }) + } + + if len(transportOpts) > 0 { + transport, err := minio.DefaultTransport(minioOpts.Secure) + if err != nil { + return nil, fmt.Errorf("failed to create default minio transport: %w", err) + } + for _, opt := range transportOpts { + opt(transport) + } + minioOpts.Transport = transport + } + + client, err := minio.New(bucket.Spec.Endpoint, &minioOpts) + if err != nil { + return nil, err + } + return &MinioClient{Client: client}, nil +} + +// newCredsFromSecret creates a new Minio credentials object from the provided +// secret. +func newCredsFromSecret(secret *corev1.Secret) *credentials.Credentials { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) + } + if accessKey != "" && secretKey != "" { + return credentials.NewStaticV4(accessKey, secretKey, "") + } + return nil +} + +// newAWSCreds creates a new Minio credentials object for `aws` bucket provider. +// +// This function is only called when Secret authentication is not available. +// +// Uses AWS SDK's config.LoadDefaultConfig() which supports: +// - Workload Identity (IRSA/EKS Pod Identity) +// - EC2 instance profiles +// - Environment variables +// - Shared credentials files +// - All other AWS SDK authentication methods +func newAWSCreds(ctx context.Context, o *options) (*credentials.Credentials, error) { + var opts auth.Options + opts.Apply(o.authOpts...) + + awsCredsProvider := awsauth.NewCredentialsProvider(ctx, o.authOpts...) + awsCreds, err := awsCredsProvider.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("AWS authentication failed: %w", err) + } + + return credentials.NewStaticV4( + awsCreds.AccessKeyID, + awsCreds.SecretAccessKey, + awsCreds.SessionToken, + ), nil +} + +// newGenericCreds creates a new Minio credentials object for the `generic` bucket provider. +func newGenericCreds(bucket *sourcev1.Bucket, o *options) *credentials.Credentials { + + sts := bucket.Spec.STS + if sts == nil { + return nil + } + + switch sts.Provider { + case sourcev1.STSProviderLDAP: + client := &http.Client{Transport: http.DefaultTransport} + if o.proxyURL != nil || o.stsTLSConfig != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + if o.proxyURL != nil { + transport.Proxy = http.ProxyURL(o.proxyURL) + } + if o.stsTLSConfig != nil { + transport.TLSClientConfig = o.stsTLSConfig.Clone() + } + client = &http.Client{Transport: transport} + } + var username, password string + if o.stsSecret != nil { + username = string(o.stsSecret.Data["username"]) + password = string(o.stsSecret.Data["password"]) + } + return credentials.New(&credentials.LDAPIdentity{ + Client: client, + STSEndpoint: sts.Endpoint, + LDAPUsername: username, + LDAPPassword: password, + }) + } + + return nil +} + +// ValidateSecret validates the credential secret. The provided Secret may +// be nil. +func ValidateSecret(secret *corev1.Secret) error { + if secret == nil { + return nil + } + err := fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + if _, ok := secret.Data["accesskey"]; !ok { + return err + } + if _, ok := secret.Data["secretkey"]; !ok { + return err + } + return nil +} + +// ValidateSTSProvider validates the STS provider. +func ValidateSTSProvider(bucketProvider string, sts *sourcev1.BucketSTSSpec) error { + errProviderIncompatbility := fmt.Errorf("STS provider '%s' is not supported for '%s' bucket provider", + sts.Provider, bucketProvider) + errSecretNotRequired := fmt.Errorf("spec.sts.secretRef is not required for the '%s' STS provider", + sts.Provider) + errCertSecretNotRequired := fmt.Errorf("spec.sts.certSecretRef is not required for the '%s' STS provider", + sts.Provider) + + switch bucketProvider { + case sourcev1.BucketProviderAmazon: + switch sts.Provider { + case sourcev1.STSProviderAmazon: + if sts.SecretRef != nil { + return errSecretNotRequired + } + if sts.CertSecretRef != nil { + return errCertSecretNotRequired + } + return nil + default: + return errProviderIncompatbility + } + case sourcev1.BucketProviderGeneric: + switch sts.Provider { + case sourcev1.STSProviderLDAP: + return nil + default: + return errProviderIncompatbility + } + } + + return fmt.Errorf("STS configuration is not supported for '%s' bucket provider", bucketProvider) +} + +// ValidateSTSSecret validates the STS secret. The provided Secret may be nil. +func ValidateSTSSecret(stsProvider string, secret *corev1.Secret) error { + switch stsProvider { + case sourcev1.STSProviderLDAP: + return validateSTSSecretForProvider(stsProvider, secret, "username", "password") + default: + return nil + } +} + +// validateSTSSecretForProvider validates the STS secret for each provider. +// The provided Secret may be nil. +func validateSTSSecretForProvider(stsProvider string, secret *corev1.Secret, keys ...string) error { + if secret == nil { + return nil + } + err := fmt.Errorf("invalid '%s' secret data for '%s' STS provider: required fields %s", + secret.Name, stsProvider, strings.Join(keys, ", ")) + if len(secret.Data) == 0 { + return err + } + for _, key := range keys { + value, ok := secret.Data[key] + if !ok || len(value) == 0 { + return err + } + } + return nil +} + +// FGetObject gets the object from the provided object storage bucket, and +// writes it to targetPath. +// It returns the etag of the successfully fetched file, or any error. +func (c *MinioClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { + stat, err := c.Client.StatObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + return "", err + } + opts := minio.GetObjectOptions{} + if err = opts.SetMatchETag(stat.ETag); err != nil { + return "", err + } + if err = c.Client.FGetObject(ctx, bucketName, objectName, localPath, opts); err != nil { + return "", err + } + return stat.ETag, nil +} + +// VisitObjects iterates over the items in the provided object storage +// bucket, calling visit for every item. +// If the underlying client or the visit callback returns an error, +// it returns early. +func (c *MinioClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error { + for object := range c.Client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{ + Recursive: true, + Prefix: prefix, + UseV1: s3utils.IsGoogleEndpoint(*c.Client.EndpointURL()), + }) { + if object.Err != nil { + err := fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, object.Err) + return err + } + + if err := visit(object.Key, object.ETag); err != nil { + return err + } + } + return nil +} + +// ObjectIsNotFound checks if the error provided is a minio.ErrResponse +// with "NoSuchKey" code. +func (c *MinioClient) ObjectIsNotFound(err error) bool { + if resp := new(minio.ErrorResponse); errors.As(err, resp) { + return resp.Code == "NoSuchKey" + } + return false +} + +// Close closes the Minio Client and logs any useful errors. +func (c *MinioClient) Close(_ context.Context) { + // Minio client does not provide a close method +} diff --git a/internal/bucket/minio/minio_test.go b/internal/bucket/minio/minio_test.go new file mode 100644 index 000000000..d6ba7baa4 --- /dev/null +++ b/internal/bucket/minio/minio_test.go @@ -0,0 +1,825 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package minio + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/xml" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/uuid" + miniov7 "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + . "github.com/onsi/gomega" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testlistener "github.com/fluxcd/source-controller/tests/listener" + testproxy "github.com/fluxcd/source-controller/tests/proxy" +) + +const ( + objectName string = "test.yaml" + objectEtag string = "b07bba5a280b58791bc78fb9fc414b09" +) + +var ( + // testMinioVersion is the version (image tag) of the Minio server image + // used to test against. + testMinioVersion = "RELEASE.2024-05-07T06-41-25Z" + // testMinioRootUser is the root user of the Minio server. + testMinioRootUser = "fluxcd" + // testMinioRootPassword is the root password of the Minio server. + testMinioRootPassword = "passw0rd!" + // testVaultAddress is the address of the Minio server, it is set + // by TestMain after booting it. + testMinioAddress string + // testMinioClient is the Minio client used to test against, it is set + // by TestMain after booting the Minio server. + testMinioClient *MinioClient + // testTLSConfig is the TLS configuration used to connect to the Minio server. + testTLSConfig *tls.Config + // testServerCert is the path to the server certificate used to start the Minio + // and STS servers. + testServerCert string + // testServerKey is the path to the server key used to start the Minio and STS servers. + testServerKey string + // ctx is the common context used in tests. + ctx context.Context +) + +var ( + bucketName = "test-bucket-minio" + uuid.New().String() + prefix = "" + secret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + emptySecret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-secret", + Namespace: "default", + }, + Data: map[string][]byte{}, + Type: "Opaque", + } + bucket = sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Provider: "generic", + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + bucketAwsProvider = sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Provider: "aws", + }, + } +) + +func TestMain(m *testing.M) { + // Initialize common test context + ctx = context.Background() + + // Uses a sensible default on Windows (TCP/HTTP) and Linux/MacOS (socket) + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("could not connect to docker: %s", err) + } + + // Load a private key and certificate from a self-signed CA for the Minio server and + // a client TLS configuration to connect to the Minio server. + testServerCert, testServerKey, testTLSConfig, err = loadServerCertAndClientTLSConfig() + if err != nil { + log.Fatalf("could not load server cert and client TLS config: %s", err) + } + + // Pull the image, create a container based on it, and run it + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "minio/minio", + Tag: testMinioVersion, + ExposedPorts: []string{ + "9000/tcp", + "9001/tcp", + }, + Env: []string{ + "MINIO_ROOT_USER=" + testMinioRootUser, + "MINIO_ROOT_PASSWORD=" + testMinioRootPassword, + }, + Cmd: []string{"server", "/data", "--console-address", ":9001"}, + Mounts: []string{ + fmt.Sprintf("%s:/root/.minio/certs/public.crt", testServerCert), + fmt.Sprintf("%s:/root/.minio/certs/private.key", testServerKey), + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + }) + if err != nil { + log.Fatalf("could not start resource: %s", err) + } + + purgeResource := func() { + if err := pool.Purge(resource); err != nil { + log.Printf("could not purge resource: %s", err) + } + } + + // Set the address of the Minio server used for testing. + testMinioAddress = fmt.Sprintf("127.0.0.1:%v", resource.GetPort("9000/tcp")) + + // Construct a Minio client using the address of the Minio server. + testMinioClient, err = NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + if err != nil { + log.Fatalf("cannot create Minio client: %s", err) + } + + // Wait until Minio is ready to serve requests... + if err := pool.Retry(func() error { + hCancel, err := testMinioClient.HealthCheck(1 * time.Second) + if err != nil { + log.Fatalf("cannot start Minio health check: %s", err) + } + defer hCancel() + + if !testMinioClient.IsOnline() { + return fmt.Errorf("client is offline: Minio is not ready") + } + return nil + }); err != nil { + purgeResource() + log.Fatalf("could not connect to docker: %s", err) + } + + createBucket(ctx) + addObjectToBucket(ctx) + run := m.Run() + removeObjectFromBucket(ctx) + deleteBucket(ctx) + purgeResource() + os.Exit(run) +} + +func TestNewClient(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) +} + +func TestNewClientEmptySecret(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(emptySecret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) +} + +func TestNewClientAWSProvider(t *testing.T) { + t.Run("with secret", func(t *testing.T) { + validSecret := corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket, WithSecret(&validSecret)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + }) + + t.Run("without secret", func(t *testing.T) { + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("AWS authentication failed")) + g.Expect(minioClient).To(BeNil()) + }) +} + +func TestBucketExists(t *testing.T) { + exists, err := testMinioClient.BucketExists(ctx, bucketName) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) +} + +func TestBucketNotExists(t *testing.T) { + exists, err := testMinioClient.BucketExists(ctx, "notexistsbucket") + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) +} + +func TestFGetObject(t *testing.T) { + tempDir := t.TempDir() + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + _, err := testMinioClient.FGetObject(ctx, bucketName, objectName, path) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { + var credsRetrieved bool + + // start a mock LDAP STS server + ldapSTSListener, ldapSTSAddr, _ := testlistener.New(t) + ldapSTSEndpoint := fmt.Sprintf("https://%s", ldapSTSAddr) + ldapSTSHandler := http.NewServeMux() + var ldapUsername, ldapPassword string + ldapSTSHandler.HandleFunc("POST /", + func(w http.ResponseWriter, r *http.Request) { + g := NewWithT(t) + err := r.ParseForm() + g.Expect(err).NotTo(HaveOccurred()) + username := r.Form.Get("LDAPUsername") + password := r.Form.Get("LDAPPassword") + g.Expect(username).To(Equal(ldapUsername)) + g.Expect(password).To(Equal(ldapPassword)) + var result credentials.LDAPIdentityResult + result.Credentials.AccessKey = testMinioRootUser + result.Credentials.SecretKey = testMinioRootPassword + err = xml.NewEncoder(w).Encode(credentials.AssumeRoleWithLDAPResponse{Result: result}) + g.Expect(err).NotTo(HaveOccurred()) + credsRetrieved = true + }) + ldapSTSServer := &http.Server{ + Addr: ldapSTSAddr, + Handler: ldapSTSHandler, + } + go ldapSTSServer.ServeTLS(ldapSTSListener, testServerCert, testServerKey) + defer ldapSTSServer.Shutdown(ctx) + + // start proxy + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + provider string + stsSpec *sourcev1.BucketSTSSpec + opts []Option + ldapUsername string + ldapPassword string + err string + }{ + { + name: "with correct ldap endpoint", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{WithSTSTLSConfig(testTLSConfig)}, + }, + { + name: "with incorrect ldap endpoint", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: fmt.Sprintf("http://localhost:%d", 1), + }, + err: "connection refused", + }, + { + name: "with correct ldap endpoint and secret", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithSTSTLSConfig(testTLSConfig), + WithSTSSecret(&corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("password"), + }, + }), + }, + ldapUsername: "user", + ldapPassword: "password", + }, + { + name: "with correct ldap endpoint and proxy", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithProxyURL(&url.URL{Scheme: "http", Host: proxyAddr}), + WithSTSTLSConfig(testTLSConfig), + }, + }, + { + name: "with correct ldap endpoint and incorrect proxy", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithProxyURL(&url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}), + }, + err: "connection refused", + }, + { + name: "with correct ldap endpoint and without client tls config", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + err: "tls: failed to verify certificate", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + credsRetrieved = false + ldapUsername = tt.ldapUsername + ldapPassword = tt.ldapPassword + + bucket := bucketStub(bucket, testMinioAddress) + bucket.Spec.Provider = tt.provider + bucket.Spec.STS = tt.stsSpec + + opts := tt.opts + opts = append(opts, WithTLSConfig(testTLSConfig)) + + minioClient, err := NewClient(ctx, bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + + path := filepath.Join(t.TempDir(), sourceignore.IgnoreFile) + _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(credsRetrieved).To(BeTrue()) + } + }) + } +} + +func TestNewClientAndFGetObjectWithProxy(t *testing.T) { + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + proxyURL *url.URL + errSubstring string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + errSubstring: "connection refused", + }, + } + + // run test + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig), + WithProxyURL(tt.proxyURL)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + tempDir := t.TempDir() + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) + if tt.errSubstring != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.errSubstring)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestFGetObjectNotExists(t *testing.T) { + tempDir := t.TempDir() + badKey := "invalid.txt" + path := filepath.Join(tempDir, badKey) + _, err := testMinioClient.FGetObject(ctx, bucketName, badKey, path) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("The specified key does not exist.")) + g.Expect(testMinioClient.ObjectIsNotFound(err)).To(BeTrue()) +} + +func TestVisitObjects(t *testing.T) { + keys := []string{} + etags := []string{} + err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { + keys = append(keys, key) + etags = append(etags, etag) + return nil + }) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) +} + +func TestVisitObjectsErr(t *testing.T) { + badBucketName := "bad-bucket" + err := testMinioClient.VisitObjects(ctx, badBucketName, prefix, func(string, string) error { + return nil + }) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName))) +} + +func TestVisitObjectsCallbackErr(t *testing.T) { + mockErr := fmt.Errorf("mock") + err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { + return mockErr + }) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) +} + +func TestValidateSecret(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + secret *corev1.Secret + error bool + }{ + { + name: "valid secret", + secret: secret.DeepCopy(), + }, + { + name: "nil secret", + secret: nil, + }, + { + name: "invalid secret", + secret: emptySecret.DeepCopy(), + error: true, + }, + } + for _, testCase := range testCases { + tt := testCase + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + err := ValidateSecret(tt.secret) + if tt.error { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name))) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestValidateSTSProvider(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + bucketProvider string + stsProvider string + withSecret bool + withCertSecret bool + err string + }{ + { + name: "aws", + bucketProvider: "aws", + stsProvider: "aws", + }, + { + name: "aws does not require a secret", + bucketProvider: "aws", + stsProvider: "aws", + withSecret: true, + err: "spec.sts.secretRef is not required for the 'aws' STS provider", + }, + { + name: "aws does not require a cert secret", + bucketProvider: "aws", + stsProvider: "aws", + withCertSecret: true, + err: "spec.sts.certSecretRef is not required for the 'aws' STS provider", + }, + { + name: "ldap", + bucketProvider: "generic", + stsProvider: "ldap", + }, + { + name: "ldap may use a secret", + bucketProvider: "generic", + stsProvider: "ldap", + withSecret: true, + }, + { + name: "ldap may use a cert secret", + bucketProvider: "generic", + stsProvider: "ldap", + withCertSecret: true, + }, + { + name: "ldap sts provider unsupported for aws bucket provider", + bucketProvider: "aws", + stsProvider: "ldap", + err: "STS provider 'ldap' is not supported for 'aws' bucket provider", + }, + { + name: "aws sts provider unsupported for generic bucket provider", + bucketProvider: "generic", + stsProvider: "aws", + err: "STS provider 'aws' is not supported for 'generic' bucket provider", + }, + { + name: "unsupported bucket provider", + bucketProvider: "gcp", + stsProvider: "ldap", + err: "STS configuration is not supported for 'gcp' bucket provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + sts := &sourcev1.BucketSTSSpec{ + Provider: tt.stsProvider, + } + if tt.withSecret { + sts.SecretRef = &meta.LocalObjectReference{} + } + if tt.withCertSecret { + sts.CertSecretRef = &meta.LocalObjectReference{} + } + g := NewWithT(t) + err := ValidateSTSProvider(tt.bucketProvider, sts) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestValidateSTSSecret(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + provider string + secret *corev1.Secret + err string + }{ + { + name: "ldap provider does not require a secret", + provider: "ldap", + }, + { + name: "valid ldap secret", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + }, + { + name: "empty ldap secret", + provider: "ldap", + secret: &corev1.Secret{ObjectMeta: v1.ObjectMeta{Name: "ldap-secret"}}, + err: "invalid 'ldap-secret' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret missing password", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret missing username", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "password": []byte("pass"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret with empty username", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte(""), + "password": []byte("pass"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret with empty password", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte(""), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + err := ValidateSTSSecret(tt.provider, tt.secret) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func bucketStub(bucket sourcev1.Bucket, endpoint string) *sourcev1.Bucket { + b := bucket.DeepCopy() + b.Spec.Endpoint = endpoint + b.Spec.Insecure = false + return b +} + +func createBucket(ctx context.Context) { + if err := testMinioClient.Client.MakeBucket(ctx, bucketName, miniov7.MakeBucketOptions{}); err != nil { + exists, errBucketExists := testMinioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + deleteBucket(ctx) + } else { + log.Fatalf("could not create bucket: %s", err) + } + } +} + +func deleteBucket(ctx context.Context) { + if err := testMinioClient.Client.RemoveBucket(ctx, bucketName); err != nil { + log.Println(err) + } +} + +func addObjectToBucket(ctx context.Context) { + fileReader := strings.NewReader(getObjectFile()) + fileSize := fileReader.Size() + _, err := testMinioClient.Client.PutObject(ctx, bucketName, objectName, fileReader, fileSize, miniov7.PutObjectOptions{ + ContentType: "text/x-yaml", + }) + if err != nil { + log.Println(err) + } +} + +func removeObjectFromBucket(ctx context.Context) { + if err := testMinioClient.Client.RemoveObject(ctx, bucketName, objectName, miniov7.RemoveObjectOptions{ + GovernanceBypass: true, + }); err != nil { + log.Println(err) + } +} + +func getObjectFile() string { + return ` + apiVersion: source.toolkit.fluxcd.io/v1 + kind: Bucket + metadata: + name: podinfo + namespace: default + spec: + interval: 5m + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s + ` +} + +func loadServerCertAndClientTLSConfig() (serverCert string, serverKey string, clientConf *tls.Config, err error) { + const certsDir = "../../controller/testdata/certs" + clientConf = &tls.Config{} + + serverCert, err = filepath.Abs(filepath.Join(certsDir, "server.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to get server cert path: %w", err) + } + serverKey, err = filepath.Abs(filepath.Join(certsDir, "server-key.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to get server key path: %w", err) + } + + b, err := os.ReadFile(filepath.Join(certsDir, "ca.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to load CA: %w", err) + } + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(b) { + return "", "", nil, errors.New("failed to append CA to pool") + } + clientConf.RootCAs = caPool + + clientCert := filepath.Join(certsDir, "client.pem") + clientKey := filepath.Join(certsDir, "client-key.pem") + client, err := tls.LoadX509KeyPair(clientCert, clientKey) + if err != nil { + return "", "", nil, fmt.Errorf("failed to load client cert and key: %w", err) + } + clientConf.Certificates = []tls.Certificate{client} + + return +} diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 1c11f09d1..6f8ee8608 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -132,7 +132,7 @@ func (c *cache) Delete(key string) { } // Clear all items from the cache. -// This reallocate the inderlying array holding the items, +// This reallocates the underlying array holding the items, // so that the memory used by the items is reclaimed. func (c *cache) Clear() { c.mu.Lock() @@ -163,11 +163,10 @@ func (c *cache) HasExpired(key string) bool { func (c *cache) SetExpiration(key string, expiration time.Duration) { c.mu.Lock() item, ok := c.Items[key] - if !ok { - c.mu.Unlock() - return + if ok { + item.Expiration = time.Now().Add(expiration).UnixNano() + c.Items[key] = item } - item.Expiration = time.Now().Add(expiration).UnixNano() c.mu.Unlock() } diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go index 70d87c8ab..e6d3d6ac6 100644 --- a/internal/cache/cache_test.go +++ b/internal/cache/cache_test.go @@ -85,3 +85,55 @@ func TestCache(t *testing.T) { g.Expect(found).To(BeFalse()) g.Expect(item).To(BeNil()) } + +func TestCacheExpiration(t *testing.T) { + g := NewWithT(t) + cache := New(10, 0) + + key := "testKey" + value := "testValue" + expiration := 1 * time.Second + + err := cache.Add(key, value, expiration) + g.Expect(err).ToNot(HaveOccurred()) + + newExpiration := 2 * time.Second + cache.SetExpiration(key, newExpiration) + actualExpiration := cache.GetExpiration(key) + + g.Expect(actualExpiration).Should(BeNumerically("~", newExpiration, 100*time.Millisecond)) + + g.Expect(cache.HasExpired(key)).To(BeFalse()) + + time.Sleep(newExpiration + 100*time.Millisecond) + + g.Expect(cache.HasExpired(key)).To(BeTrue()) + + g.Expect(cache.GetExpiration(key)).To(BeZero()) + + nonExistentKey := "nonExistent" + cache.SetExpiration(nonExistentKey, 1*time.Second) + g.Expect(cache.GetExpiration(nonExistentKey)).To(BeZero()) + + g.Expect(cache.HasExpired(nonExistentKey)).To(BeTrue()) +} + +func TestCacheDeleteClear(t *testing.T) { + g := NewWithT(t) + cache := New(3, 0) + + err := cache.Add("key1", "value1", 0) + g.Expect(err).ToNot(HaveOccurred()) + err = cache.Add("key2", "value2", 0) + g.Expect(err).ToNot(HaveOccurred()) + err = cache.Add("key3", "value3", 0) + g.Expect(err).ToNot(HaveOccurred()) + + cache.Delete("key2") + _, found := cache.Get("key2") + g.Expect(found).To(BeFalse()) + g.Expect(cache.ItemCount()).To(Equal(2)) + + cache.Clear() + g.Expect(cache.ItemCount()).To(Equal(0)) +} diff --git a/internal/cache/metrics.go b/internal/cache/metrics.go index dc5514c05..09b43ec5b 100644 --- a/internal/cache/metrics.go +++ b/internal/cache/metrics.go @@ -40,6 +40,7 @@ type CacheRecorder struct { // - "miss" // - "hit" // - "update" +// // The name is the name of the reconciled resource. // The namespace is the namespace of the reconciled resource. func NewCacheRecorder() *CacheRecorder { @@ -66,6 +67,11 @@ func (r *CacheRecorder) IncCacheEvents(event, name, namespace string) { r.cacheEventsCounter.WithLabelValues(event, name, namespace).Inc() } +// DeleteCacheEvent deletes the cache event metric. +func (r *CacheRecorder) DeleteCacheEvent(event, name, namespace string) { + r.cacheEventsCounter.DeleteLabelValues(event, name, namespace) +} + // MustMakeMetrics creates a new CacheRecorder, and registers the metrics collectors in the controller-runtime metrics registry. func MustMakeMetrics() *CacheRecorder { r := NewCacheRecorder() diff --git a/controllers/artifact.go b/internal/controller/artifact.go similarity index 57% rename from controllers/artifact.go rename to internal/controller/artifact.go index 8d034f075..bebc8d5ae 100644 --- a/controllers/artifact.go +++ b/internal/controller/artifact.go @@ -14,11 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller -import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" +import ( + "github.com/fluxcd/pkg/apis/meta" +) -type artifactSet []*sourcev1.Artifact +type artifactSet []*meta.Artifact // Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. func (s artifactSet) Diff(set artifactSet) bool { @@ -37,25 +39,3 @@ outer: } return false } - -// hasArtifactUpdated returns true if any of the revisions in the current artifacts -// does not match any of the artifacts in the updated artifacts -// NOTE: artifactSet is a replacement for this. Remove this once it's not used -// anywhere. -func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { - if len(current) != len(updated) { - return true - } - -OUTER: - for _, c := range current { - for _, u := range updated { - if u.HasRevision(c.Revision) { - continue OUTER - } - } - return true - } - - return false -} diff --git a/controllers/artifact_matchers_test.go b/internal/controller/artifact_matchers_test.go similarity index 83% rename from controllers/artifact_matchers_test.go rename to internal/controller/artifact_matchers_test.go index 5007cc6dd..af716e086 100644 --- a/controllers/artifact_matchers_test.go +++ b/internal/controller/artifact_matchers_test.go @@ -14,29 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "fmt" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" + + "github.com/fluxcd/pkg/apis/meta" ) // MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. -func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { +func MatchArtifact(expected *meta.Artifact) types.GomegaMatcher { return &matchArtifact{ expected: expected, } } type matchArtifact struct { - expected *sourcev1.Artifact + expected *meta.Artifact } func (m matchArtifact) Match(actual interface{}) (success bool, err error) { - actualArtifact, ok := actual.(*sourcev1.Artifact) + actualArtifact, ok := actual.(*meta.Artifact) if !ok { return false, fmt.Errorf("actual should be a pointer to an Artifact") } @@ -51,9 +52,6 @@ func (m matchArtifact) Match(actual interface{}) (success bool, err error) { if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { return ok, err } - if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { - return ok, err - } if ok, err = Equal(m.expected.Size).Match(actualArtifact.Size); !ok { return ok, err } diff --git a/controllers/artifact_test.go b/internal/controller/artifact_test.go similarity index 99% rename from controllers/artifact_test.go rename to internal/controller/artifact_test.go index 935c93bf7..d40548c3c 100644 --- a/controllers/artifact_test.go +++ b/internal/controller/artifact_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "testing" diff --git a/controllers/bucket_controller.go b/internal/controller/bucket_controller.go similarity index 52% rename from controllers/bucket_controller.go rename to internal/controller/bucket_controller.go index b5545049c..7fe881be6 100644 --- a/controllers/bucket_controller.go +++ b/internal/controller/bucket_controller.go @@ -14,49 +14,57 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "context" - "crypto/sha256" + "crypto/tls" "errors" "fmt" + "net/url" "os" "path/filepath" - "sort" "strings" - "sync" "time" - "github.com/fluxcd/source-controller/pkg/azure" + "github.com/opencontainers/go-digest" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/events" + "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/bucket/azure" + "github.com/fluxcd/source-controller/internal/bucket/gcp" + "github.com/fluxcd/source-controller/internal/bucket/minio" serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/index" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/pkg/gcp" - "github.com/fluxcd/source-controller/pkg/minio" - "github.com/fluxcd/source-controller/pkg/sourceignore" ) // maxConcurrentBucketFetches is the upper bound on the goroutines used to @@ -72,7 +80,7 @@ import ( const maxConcurrentBucketFetches = 100 // bucketReadyCondition contains the information required to summarize a -// v1beta2.Bucket Ready Condition. +// v1.Bucket Ready Condition. var bucketReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -111,20 +119,24 @@ var bucketFailConditions = []string{ // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create -// BucketReconciler reconciles a v1beta2.Bucket object. +// BucketReconciler reconciles a v1.Bucket object. type BucketReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache + + patchOptions []patch.Option } type BucketReconcilerOptions struct { - MaxConcurrentReconciles int - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } // BucketProvider is an interface for fetching objects from a storage provider @@ -141,7 +153,7 @@ type BucketProvider interface { // bucket, calling visit for every item. // If the underlying client or the visit callback returns an error, // it returns early. - VisitObjects(ctx context.Context, bucketName string, visit func(key, etag string) error) error + VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error // ObjectIsNotFound returns true if the given error indicates an object // could not be found. ObjectIsNotFound(error) bool @@ -149,112 +161,39 @@ type BucketProvider interface { Close(context.Context) } -// bucketReconcileFunc is the function type for all the v1beta2.Bucket -// (sub)reconcile functions. The type implementations are grouped and -// executed serially to perform the complete reconcile of the object. -type bucketReconcileFunc func(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) - -// etagIndex is an index of storage object keys and their Etag values. -type etagIndex struct { - sync.RWMutex - index map[string]string -} - -// newEtagIndex returns a new etagIndex with an empty initialized index. -func newEtagIndex() *etagIndex { - return &etagIndex{ - index: make(map[string]string), - } -} - -func (i *etagIndex) Add(key, etag string) { - i.Lock() - defer i.Unlock() - i.index[key] = etag -} - -func (i *etagIndex) Delete(key string) { - i.Lock() - defer i.Unlock() - delete(i.index, key) +// bucketCredentials contains all credentials and configuration needed for bucket providers. +type bucketCredentials struct { + secret *corev1.Secret + proxyURL *url.URL + tlsConfig *tls.Config + stsSecret *corev1.Secret + stsTLSConfig *tls.Config } -func (i *etagIndex) Get(key string) string { - i.RLock() - defer i.RUnlock() - return i.index[key] -} - -func (i *etagIndex) Has(key string) bool { - i.RLock() - defer i.RUnlock() - _, ok := i.index[key] - return ok -} - -func (i *etagIndex) Index() map[string]string { - i.RLock() - defer i.RUnlock() - index := make(map[string]string) - for k, v := range i.index { - index[k] = v - } - return index -} - -func (i *etagIndex) Len() int { - i.RLock() - defer i.RUnlock() - return len(i.index) -} - -// Revision calculates the SHA256 checksum of the index. -// The keys are stable sorted, and the SHA256 sum is then calculated for the -// string representation of the key/value pairs, each pair written on a newline -// with a space between them. The sum result is returned as a string. -func (i *etagIndex) Revision() (string, error) { - i.RLock() - defer i.RUnlock() - keyIndex := make([]string, 0, len(i.index)) - for k := range i.index { - keyIndex = append(keyIndex, k) - } - - sort.Strings(keyIndex) - sum := sha256.New() - for _, k := range keyIndex { - if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i.index[k]))); err != nil { - return "", err - } - } - return fmt.Sprintf("%x", sum.Sum(nil)), nil -} +// bucketReconcileFunc is the function type for all the v1.Bucket +// (sub)reconcile functions. The type implementations are grouped and +// executed serially to perform the complete reconcile of the object. +type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) } func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error { + r.patchOptions = getPatchOptions(bucketReadyCondition.Owned, r.ControllerName) + return ctrl.NewControllerManagedBy(mgr). For(&sourcev1.Bucket{}). WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). WithOptions(controller.Options{ - MaxConcurrentReconciles: opts.MaxConcurrentReconciles, - RateLimiter: opts.RateLimiter, - RecoverPanic: true, + RateLimiter: opts.RateLimiter, }). Complete(r) } func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() - log := ctrl.LoggerFrom(ctx). - // Sets a reconcile ID to correlate logs from all suboperations. - WithValues("reconcileID", uuid.NewUUID()) - - // logger will be associated to the new context that is - // returned from ctrl.LoggerInto. - ctx = ctrl.LoggerInto(ctx, log) + log := ctrl.LoggerFrom(ctx) // Fetch the Bucket obj := &sourcev1.Bucket{} @@ -262,20 +201,8 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, client.IgnoreNotFound(err) } - // Record suspended status metric - r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - - // Return early if the object is suspended - if obj.Spec.Suspend { - log.Info("reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - // Initialize the patch helper with the current version of the object. - patchHelper, err := patch.NewHelper(obj, r.Client) - if err != nil { - return ctrl.Result{}, err - } + serialPatcher := patch.NewSerialPatcher(obj, r.Client) // recResult stores the abstracted reconcile result. var recResult sreconcile.Result @@ -283,36 +210,47 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Always attempt to patch the object and status after each reconciliation // NOTE: The final runtime result and error are set in this block. defer func() { - summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) summarizeOpts := []summarize.Option{ summarize.WithConditions(bucketReadyCondition), summarize.WithReconcileResult(recResult), summarize.WithReconcileError(retErr), summarize.WithIgnoreNotFound(), summarize.WithProcessors( - summarize.RecordContextualError, + summarize.ErrorActionHandler, summarize.RecordReconcileReq, ), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record readiness and duration metrics - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() - // Add finalizer first if not exist to avoid the race condition between init and delete + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition between init + // and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue return } - // Examine if the object is under deletion - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - recResult, retErr = r.reconcileDelete(ctx, obj) + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil return } @@ -322,29 +260,45 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res r.reconcileSource, r.reconcileArtifact, } - recResult, retErr = r.reconcile(ctx, obj, reconcilers) + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) return } // reconcile iterates through the bucketReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() - // Mark as reconciling if generation differs. - if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var recAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + recAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case recAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } } // Create temp working dir tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create temporary working directory: %w", err), - Reason: sourcev1.DirCreationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer func() { @@ -358,11 +312,11 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, var ( res sreconcile.Result resErr error - index = newEtagIndex() + index = index.NewDigester() ) for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, index, tmpDir) + recResult, err := rec(ctx, sp, obj, index, tmpDir) // Exit immediately on ResultRequeue. if recResult == sreconcile.ResultRequeue { return sreconcile.ResultRequeue, nil @@ -384,24 +338,19 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, } // notify emits notification related to the reconciliation. -func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *etagIndex, res sreconcile.Result, resErr error) { +func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { annotations := map[string]string{ - sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision, - sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum, - } - - var oldChecksum string - if oldObj.GetArtifact() != nil { - oldChecksum = oldObj.GetArtifact().Checksum + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, } message := fmt.Sprintf("stored artifact with %d fetched files from '%s' bucket", index.Len(), newObj.Spec.BucketName) // Notify on new artifact and failure recovery. - if oldChecksum != newObj.GetArtifact().Checksum { + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, "NewArtifact", message) ctrl.LoggerFrom(ctx).Info(message) @@ -427,22 +376,49 @@ func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1. // condition is added. // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, _ *etagIndex, _ string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) - // Determine if the advertised artifact is still in storage - if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { - obj.Status.Artifact = nil - obj.Status.URL = "" - // Remove the condition as the artifact doesn't exist. - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } } // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } return sreconcile.ResultSuccess, nil } @@ -457,93 +433,62 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // reconcileSource fetches the upstream bucket contents with the client for the // given object's Provider, and returns the result. // When a SecretRef is defined, it attempts to fetch the Secret before calling -// the provider. If this fails, it records v1beta2.FetchFailedCondition=True on +// the provider. If this fails, it records v1.FetchFailedCondition=True on // the object and returns early. -func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) { - secret, err := r.getBucketSecret(ctx, obj) - if err != nil { - e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - // Return error as the world as observed may change - return sreconcile.ResultEmpty, e - } - - // Construct provider client - var provider BucketProvider - switch obj.Spec.Provider { - case sourcev1.GoogleBucketProvider: - if err = gcp.ValidateSecret(secret); err != nil { - e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - if provider, err = gcp.NewClient(ctx, secret); err != nil { - e := &serror.Event{Err: err, Reason: "ClientError"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - case sourcev1.AzureBucketProvider: - if err = azure.ValidateSecret(secret); err != nil { - e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - if provider, err = azure.NewClient(obj, secret); err != nil { - e := &serror.Event{Err: err, Reason: "ClientError"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - default: - if err = minio.ValidateSecret(secret); err != nil { - e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - if provider, err = minio.NewClient(obj, secret); err != nil { - e := &serror.Event{Err: err, Reason: "ClientError"} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) +func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { + usesObjectLevelWorkloadIdentity := obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.BucketProviderGeneric && obj.Spec.ServiceAccountName != "" + if usesObjectLevelWorkloadIdentity { + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + e := serror.NewStalling(err, meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } - // Fetch etag index - if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { - e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) + creds, err := r.setupCredentials(ctx, obj) + if err != nil { + e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - // Calculate revision - revision, err := index.Revision() + provider, err := r.createBucketProvider(ctx, obj, creds) if err != nil { - return sreconcile.ResultEmpty, &serror.Event{ - Err: fmt.Errorf("failed to calculate revision: %w", err), - Reason: meta.FailedReason, + var stallingErr *serror.Stalling + var genericErr *serror.Generic + if errors.As(err, &stallingErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, stallingErr.Reason, "%s", stallingErr) + return sreconcile.ResultEmpty, stallingErr + } else if errors.As(err, &genericErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, genericErr.Reason, "%s", genericErr) + return sreconcile.ResultEmpty, genericErr + } else { + e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e } } + changed, err := r.syncBucketArtifacts(ctx, provider, obj, index, dir) + if err != nil { + e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } - // Mark observations about the revision on the object - defer func() { - // As fetchIndexFiles can make last-minute modifications to the etag - // index, we need to re-calculate the revision at the end - revision, err := index.Revision() - if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision after fetching etag index") - return + // Update artifact status if changes were detected + if changed { + revision := index.Digest(intdigest.Canonical) + message := fmt.Sprintf("new upstream revision '%s'", revision) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) } - - if !obj.GetArtifact().HasRevision(revision) { - message := fmt.Sprintf("new upstream revision '%s'", revision) - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) - conditions.MarkReconciling(obj, "NewRevision", message) - } - }() - - if !obj.GetArtifact().HasRevision(revision) { - if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { - e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason} - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + return sreconcile.ResultEmpty, err } } @@ -555,91 +500,92 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { // Calculate revision - revision, err := index.Revision() - if err != nil { - return sreconcile.ResultEmpty, &serror.Event{ - Err: fmt.Errorf("failed to calculate revision of new artifact: %w", err), - Reason: meta.FailedReason, - } - } + revision := index.Digest(intdigest.Canonical) // Create artifact - artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision.String(), fmt.Sprintf("%s.tar.gz", revision.Encoded())) // Set the ArtifactInStorageCondition if there's no drift. defer func() { - if obj.GetArtifact().HasRevision(artifact.Revision) { - conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, - "stored artifact for revision '%s'", artifact.Revision) + if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" { + curRev := digest.Digest(curArtifact.Revision) + if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact: revision '%s'", artifact.Revision) + } } }() // The artifact is up-to-date - if obj.GetArtifact().HasRevision(artifact.Revision) { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) - return sreconcile.ResultSuccess, nil + if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" { + curRev := digest.Digest(curArtifact.Revision) + if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } } // Ensure target path exists and is a directory if f, err := os.Stat(dir); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to stat source path: %w", err), - Reason: sourcev1.StatOperationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to stat source path: %w", err), + sourcev1.StatOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } else if !f.IsDir() { - e := &serror.Event{ - Err: fmt.Errorf("source path '%s' is not a directory", dir), - Reason: sourcev1.InvalidPathReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("source path '%s' is not a directory", dir), + sourcev1.InvalidPathReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Ensure artifact directory exists and acquire lock if err := r.Storage.MkdirAll(artifact); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create artifact directory: %w", err), - Reason: sourcev1.DirCreationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } unlock, err := r.Storage.Lock(artifact) if err != nil { - return sreconcile.ResultEmpty, &serror.Event{ - Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), - Reason: meta.FailedReason, - } + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) } defer unlock() // Archive directory to storage if err := r.Storage.Archive(&artifact, dir, nil); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("unable to archive artifact to storage: %s", err), - Reason: sourcev1.ArchiveOperationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("unable to archive artifact to storage: %s", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Record it on the object obj.Status.Artifact = artifact.DeepCopy() + obj.Status.ObservedIgnore = obj.Spec.Ignore // Update symlink on a "best effort" basis url, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, "failed to update status URL symlink: %s", err) } if url != "" { @@ -662,6 +608,10 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.BucketKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -674,12 +624,12 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - return &serror.Event{ - Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err), - Reason: "GarbageCollectionFailed", - } + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %s", err), + "GarbageCollectionFailed", + ) } else if deleted != "" { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") } obj.Status.Artifact = nil @@ -688,37 +638,20 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc if obj.GetArtifact() != nil { delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) if err != nil { - return &serror.Event{ - Err: fmt.Errorf("garbage collection of artifacts failed: %w", err), - Reason: "GarbageCollectionFailed", - } + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) } if len(delFiles) > 0 { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) return nil } } return nil } -// getBucketSecret attempts to fetch the Secret reference if specified on the -// obj. It returns any client error. -func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) { - if obj.Spec.SecretRef == nil { - return nil, nil - } - secretName := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.Spec.SecretRef.Name, - } - secret := &corev1.Secret{} - if err := r.Get(ctx, secretName, secret); err != nil { - return nil, fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err) - } - return secret, nil -} - // eventLogf records events, and logs at the same time. // // This log is different from the debug log in the EventRecorder, in the sense @@ -749,7 +682,7 @@ func (r *BucketReconciler) annotatedEventLogf(ctx context.Context, // bucket using the given provider, while filtering them using .sourceignore // rules. After fetching an object, the etag value in the index is updated to // the current value to ensure accuracy. -func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *etagIndex, tempDir string) error { +func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -767,7 +700,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1. path := filepath.Join(tempDir, sourceignore.IgnoreFile) if _, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if !provider.ObjectIsNotFound(err) { - return err + return fmt.Errorf("failed to get Etag for '%s' object: %w", sourceignore.IgnoreFile, serror.SanitizeError(err)) } } ps, err := sourceignore.ReadIgnoreFile(path, nil) @@ -781,7 +714,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1. matcher := sourceignore.NewMatcher(ps) // Build up index - err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, func(key, etag string) error { + err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, obj.Spec.Prefix, func(key, etag string) error { if strings.HasSuffix(key, "/") || key == sourceignore.IgnoreFile { return nil } @@ -803,7 +736,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1. // using the given provider, and stores them into tempDir. It downloads in // parallel, but limited to the maxConcurrentBucketFetches. // Given an index is provided, the bucket is assumed to exist. -func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *etagIndex, tempDir string) error { +func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -831,7 +764,7 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1 index.Delete(k) return nil } - return fmt.Errorf("failed to get '%s' object: %w", k, err) + return fmt.Errorf("failed to get '%s' object: %w", k, serror.SanitizeError(err)) } if t != etag { index.Add(k, etag) @@ -847,3 +780,206 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1 return nil } + +// setupCredentials retrieves and validates secrets for authentication, TLS configuration, and proxy settings. +// It returns all credentials needed for bucket providers. +func (r *BucketReconciler) setupCredentials(ctx context.Context, obj *sourcev1.Bucket) (*bucketCredentials, error) { + var secret *corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s': %w", secretName, err) + } + } + + var stsSecret *corev1.Secret + if obj.Spec.STS != nil && obj.Spec.STS.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.SecretRef.Name, + } + stsSecret = &corev1.Secret{} + if err := r.Get(ctx, secretName, stsSecret); err != nil { + return nil, fmt.Errorf("failed to get STS secret '%s': %w", secretName, err) + } + } + + var ( + err error + proxyURL *url.URL + tlsConfig *tls.Config + stsTLSConfig *tls.Config + ) + + if obj.Spec.ProxySecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.ProxySecretRef.Name, + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL: %w", err) + } + } + + if obj.Spec.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.CertSecretRef.Name, + } + tlsConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get TLS config: %w", err) + } + } + + if obj.Spec.STS != nil && obj.Spec.STS.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.CertSecretRef.Name, + } + stsTLSConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get STS TLS config: %w", err) + } + } + + return &bucketCredentials{ + secret: secret, + proxyURL: proxyURL, + tlsConfig: tlsConfig, + stsSecret: stsSecret, + stsTLSConfig: stsTLSConfig, + }, nil +} + +// createBucketProvider creates a provider-specific bucket client using the given credentials and configuration. +// It handles different bucket providers (AWS, GCP, Azure, generic) and returns the appropriate client. +func (r *BucketReconciler) createBucketProvider(ctx context.Context, obj *sourcev1.Bucket, creds *bucketCredentials) (BucketProvider, error) { + authOpts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + authOpts = append(authOpts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.BucketKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + authOpts = append(authOpts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if creds.proxyURL != nil { + authOpts = append(authOpts, auth.WithProxyURL(*creds.proxyURL)) + } + + if obj.Spec.Region != "" { + authOpts = append(authOpts, auth.WithSTSRegion(obj.Spec.Region)) + } + + if sts := obj.Spec.STS; sts != nil { + authOpts = append(authOpts, auth.WithSTSEndpoint(sts.Endpoint)) + } + + switch obj.Spec.Provider { + case sourcev1.BucketProviderGoogle: + var opts []gcp.Option + if creds.proxyURL != nil { + opts = append(opts, gcp.WithProxyURL(creds.proxyURL)) + } + + if creds.secret != nil { + if err := gcp.ValidateSecret(creds.secret); err != nil { + return nil, err + } + opts = append(opts, gcp.WithSecret(creds.secret)) + } else { + opts = append(opts, gcp.WithAuth(authOpts...)) + } + + return gcp.NewClient(ctx, obj, opts...) + + case sourcev1.BucketProviderAzure: + if err := azure.ValidateSecret(creds.secret); err != nil { + return nil, err + } + var opts []azure.Option + if creds.secret != nil { + opts = append(opts, azure.WithSecret(creds.secret)) + } + if creds.proxyURL != nil { + opts = append(opts, azure.WithProxyURL(creds.proxyURL)) + } + opts = append(opts, azure.WithAuth(authOpts...)) + return azure.NewClient(ctx, obj, opts...) + + default: + if err := minio.ValidateSecret(creds.secret); err != nil { + return nil, err + } + if sts := obj.Spec.STS; sts != nil { + if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil { + return nil, serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason) + } + if _, err := url.Parse(sts.Endpoint); err != nil { + return nil, serror.NewStalling(fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err), sourcev1.URLInvalidReason) + } + if err := minio.ValidateSTSSecret(sts.Provider, creds.stsSecret); err != nil { + return nil, serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + } + } + var opts []minio.Option + if creds.secret != nil { + opts = append(opts, minio.WithSecret(creds.secret)) + } else if obj.Spec.Provider == sourcev1.BucketProviderAmazon { + opts = append(opts, minio.WithAuth(authOpts...)) + } + if creds.tlsConfig != nil { + opts = append(opts, minio.WithTLSConfig(creds.tlsConfig)) + } + if creds.proxyURL != nil { + opts = append(opts, minio.WithProxyURL(creds.proxyURL)) + } + if creds.stsSecret != nil { + opts = append(opts, minio.WithSTSSecret(creds.stsSecret)) + } + if creds.stsTLSConfig != nil { + opts = append(opts, minio.WithSTSTLSConfig(creds.stsTLSConfig)) + } + return minio.NewClient(ctx, obj, opts...) + } +} + +// syncBucketArtifacts handles etag index retrieval and bucket object fetching. +// It fetches the etag index from the provider and downloads objects to the specified directory. +// Returns true if changes were detected and artifacts were updated. +func (r *BucketReconciler) syncBucketArtifacts(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, dir string) (bool, error) { + if err := fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { + return false, err + } + var changed bool + if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { + curRev := digest.Digest(artifact.Revision) + changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) + } + + // Fetch the bucket objects if required to. + if artifact := obj.GetArtifact(); artifact == nil || changed { + if err := fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { + return false, err + } + return true, nil + } + + return false, nil +} diff --git a/controllers/bucket_controller_fetch_test.go b/internal/controller/bucket_controller_fetch_test.go similarity index 85% rename from controllers/bucket_controller_fetch_test.go rename to internal/controller/bucket_controller_fetch_test.go index 0dfaa005a..707d645f3 100644 --- a/controllers/bucket_controller_fetch_test.go +++ b/internal/controller/bucket_controller_fetch_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "context" @@ -24,10 +24,11 @@ import ( "testing" "time" - "gotest.tools/assert" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/index" ) type mockBucketObject struct { @@ -40,7 +41,7 @@ type mockBucketClient struct { objects map[string]mockBucketObject } -var mockNotFound = fmt.Errorf("not found") +var errMockNotFound = fmt.Errorf("not found") func (m mockBucketClient) BucketExists(_ context.Context, name string) (bool, error) { return name == m.bucketName, nil @@ -56,7 +57,7 @@ func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string } object, ok := m.objects[obj] if !ok { - return "", mockNotFound + return "", errMockNotFound } if err := os.WriteFile(path, []byte(object.data), os.FileMode(0660)); err != nil { return "", err @@ -65,10 +66,10 @@ func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string } func (m mockBucketClient) ObjectIsNotFound(e error) bool { - return e == mockNotFound + return e == errMockNotFound } -func (m mockBucketClient) VisitObjects(_ context.Context, _ string, f func(key, etag string) error) error { +func (m mockBucketClient) VisitObjects(_ context.Context, _ string, _ string, f func(key, etag string) error) error { for key, obj := range m.objects { if err := f(key, obj.etag); err != nil { return err @@ -77,9 +78,7 @@ func (m mockBucketClient) VisitObjects(_ context.Context, _ string, f func(key, return nil } -func (m mockBucketClient) Close(_ context.Context) { - return -} +func (m mockBucketClient) Close(_ context.Context) {} func (m *mockBucketClient) addObject(key string, object mockBucketObject) { if m.objects == nil { @@ -88,8 +87,8 @@ func (m *mockBucketClient) addObject(key string, object mockBucketObject) { m.objects[key] = object } -func (m *mockBucketClient) objectsToEtagIndex() *etagIndex { - i := newEtagIndex() +func (m *mockBucketClient) objectsToDigestIndex() *index.Digester { + i := index.NewDigester() for k, v := range m.objects { i.Add(k, v.etag) } @@ -114,13 +113,14 @@ func Test_fetchEtagIndex(t *testing.T) { client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"}) client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"}) - index := newEtagIndex() + index := index.NewDigester() err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) if err != nil { t.Fatal(err) } - assert.Equal(t, index.Len(), 3) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(3)) }) t.Run("an error while bucket does not exist", func(t *testing.T) { @@ -128,9 +128,11 @@ func Test_fetchEtagIndex(t *testing.T) { client := mockBucketClient{bucketName: "other-bucket-name"} - index := newEtagIndex() + index := index.NewDigester() err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) - assert.ErrorContains(t, err, "not found") + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("not found")) }) t.Run("filters with .sourceignore rules", func(t *testing.T) { @@ -141,7 +143,7 @@ func Test_fetchEtagIndex(t *testing.T) { client.addObject("foo.yaml", mockBucketObject{etag: "etag1", data: "foo.yaml"}) client.addObject("foo.txt", mockBucketObject{etag: "etag2", data: "foo.txt"}) - index := newEtagIndex() + index := index.NewDigester() err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) if err != nil { t.Fatal(err) @@ -154,7 +156,8 @@ func Test_fetchEtagIndex(t *testing.T) { if ok := index.Has("foo.txt"); ok { t.Error(fmt.Errorf("expected 'foo.txt' index item to not exist")) } - assert.Equal(t, index.Len(), 1) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) }) t.Run("filters with ignore rules from object", func(t *testing.T) { @@ -168,7 +171,7 @@ func Test_fetchEtagIndex(t *testing.T) { bucket := bucket.DeepCopy() bucket.Spec.Ignore = &ignore - index := newEtagIndex() + index := index.NewDigester() err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) if err != nil { t.Fatal(err) @@ -178,7 +181,8 @@ func Test_fetchEtagIndex(t *testing.T) { t.Error(err) } - assert.Equal(t, index.Len(), 1) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) if ok := index.Has("foo.txt"); !ok { t.Error(fmt.Errorf("expected 'foo.txt' index item to exist")) } @@ -203,7 +207,7 @@ func Test_fetchFiles(t *testing.T) { client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"}) client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"}) - index := client.objectsToEtagIndex() + index := client.objectsToDigestIndex() err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) if err != nil { @@ -225,7 +229,7 @@ func Test_fetchFiles(t *testing.T) { client := mockBucketClient{bucketName: bucketName, objects: map[string]mockBucketObject{}} client.objects["error"] = mockBucketObject{} - err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToEtagIndex(), tmp) + err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToDigestIndex(), tmp) if err == nil { t.Fatal("expected error but got nil") } @@ -237,14 +241,15 @@ func Test_fetchFiles(t *testing.T) { client := mockBucketClient{bucketName: bucketName} client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag2"}) - index := newEtagIndex() + index := index.NewDigester() index.Add("foo.yaml", "etag1") err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) if err != nil { t.Fatal(err) } f := index.Get("foo.yaml") - assert.Equal(t, f, "etag2") + g := NewWithT(t) + g.Expect(f).To(Equal("etag2")) }) t.Run("a disappeared index entry is removed from the index", func(t *testing.T) { @@ -253,7 +258,7 @@ func Test_fetchFiles(t *testing.T) { client := mockBucketClient{bucketName: bucketName} client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) - index := newEtagIndex() + index := index.NewDigester() index.Add("foo.yaml", "etag1") // Does not exist on server index.Add("bar.yaml", "etag2") @@ -263,8 +268,9 @@ func Test_fetchFiles(t *testing.T) { t.Fatal(err) } f := index.Get("foo.yaml") - assert.Equal(t, f, "etag1") - assert.Check(t, !index.Has("bar.yaml")) + g := NewWithT(t) + g.Expect(f).To(Equal("etag1")) + g.Expect(index.Has("bar.yaml")).To(BeFalse()) }) t.Run("can fetch more than maxConcurrentFetches", func(t *testing.T) { @@ -276,7 +282,7 @@ func Test_fetchFiles(t *testing.T) { f := fmt.Sprintf("file-%d", i) client.addObject(f, mockBucketObject{etag: f, data: f}) } - index := client.objectsToEtagIndex() + index := client.objectsToDigestIndex() err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) if err != nil { diff --git a/internal/controller/bucket_controller_test.go b/internal/controller/bucket_controller_test.go new file mode 100644 index 000000000..00ed46cb7 --- /dev/null +++ b/internal/controller/bucket_controller_test.go @@ -0,0 +1,2007 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/index" + gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs" + s3mock "github.com/fluxcd/source-controller/internal/mock/s3" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +// Environment variable to set the GCP Storage host for the GCP client. +const EnvGcpStorageHost = "STORAGE_EMULATOR_HOST" + +func TestBucketReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "bucket-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + bucket := &sourcev1.Bucket{} + bucket.Name = "test-bucket" + bucket.Namespace = namespaceName + bucket.Spec = sourcev1.BucketSpec{ + Interval: metav1.Duration{Duration: interval}, + BucketName: "foo", + Endpoint: "bar", + } + // Add a test finalizer to prevent the object from getting deleted. + bucket.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, bucket)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, bucket)).NotTo(HaveOccurred()) + + r := &BucketReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(bucket)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := s3mock.NewServer("test-bucket") + s3Server.Objects = []*s3mock.Object{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + origObj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + obj := origObj.DeepCopy() + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: bucketReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + uo, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(uo) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.Bucket, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + defer func() { + g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) + }() + + r := &BucketReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.Bucket{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + index := index.NewDigester() + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(context.TODO(), sp, obj, index, "") + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_reconcileSource_generic(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3mock.Object + middleware http.Handler + secret *corev1.Secret + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertIndex *index.Digester + assertConditions []metav1.Condition + }{ + { + name: "Reconciles generic source", + bucketName: "dummy", + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "Observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields 'accesskey' and 'secretkey'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes non-existing certSecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid certSecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), + }, + }, + { + name: "Observes non-existing proxySecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid proxySecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{}, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), + }, + }, + { + name: "Observes non-existing sts.secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + SecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "https://something", + SecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data for 'ldap' STS provider: required fields username, password"), + }, + }, + { + name: "Observes non-existing sts.certSecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.certSecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "https://something", + CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), + }, + }, + { + name: "Observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes incompatible sts.provider", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "aws", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidSTSConfigurationReason, "STS provider 'aws' is not supported for 'generic' bucket provider"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.endpoint", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "something\t", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "failed to parse STS endpoint 'something\t': parse \"something\\t\": net/url: invalid control character in URL"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3mock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "!ignored/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3mock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "ignored/file.txt": "f08907038338288420ae7dc2d30c0497", + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + }, + }, + { + name: "Up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + bucketName: "dummy", + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.Bucket{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + tmpDir := t.TempDir() + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3mock.Server + if tt.bucketName != "" { + server = s3mock.NewServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + index := index.NewDigester() + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, index, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcsmock.Object + secret *corev1.Secret + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertIndex *index.Digester + assertConditions []metav1.Condition + disableObjectLevelWorkloadIdentity bool + }{ + { + name: "Reconciles GCS source", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "Observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes non-existing proxySecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid proxySecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 1, + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 4, + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + Generation: 3, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "!ignored/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcsmock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 1, + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 2, + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + Generation: 4, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "ignored/file.txt": "f08907038338288420ae7dc2d30c0497", + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + }, + }, + { + name: "Up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + Generation: 2, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + Generation: 2, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Object-Level Workload Identity (no secret)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Controller-Level Workload Identity (no secret, no SA)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + // ServiceAccountName は設定しない (Controller-Level) + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Object-Level fails when feature gate disabled", + bucketName: "dummy", + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + disableObjectLevelWorkloadIdentity: true, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.Bucket{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + + // Handle ObjectLevelWorkloadIdentity feature gate + if !tt.disableObjectLevelWorkloadIdentity { + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + } + + tmpDir := t.TempDir() + + // Test bucket object. + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: "gcp", + }, + } + + // Set up the mock GCP bucket server. + server := gcsmock.NewServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(EnvGcpStorageHost, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(EnvGcpStorageHost)).ToNot(HaveOccurred()) + }() + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + index := index.NewDigester() + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, index, tmpDir) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + + g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes ArtifactInStorage=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Up-to-date artifact should not persist and update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + revision := index.Digest(intdigest.Canonical) + obj.Spec.Interval = metav1.Duration{Duration: interval} + // Incomplete artifact + obj.Status.Artifact = &meta.Artifact{Revision: revision.String()} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + // Still incomplete + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + t.Expect(err).ToNot(HaveOccurred()) + t.Expect(f.Close()).ToNot(HaveOccurred()) + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "is not a directory"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.Bucket{}) + + r := &BucketReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + tmpDir := t.TempDir() + index := index.NewDigester() + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, index, tmpDir) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(context.TODO(), sp, obj, index, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.Bucket) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "positive conditions only", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + APIVersion: sourcev1.GroupVersion.String(), + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "foo", + }, + } + + c := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithObjects(obj). + WithStatusSubresource(&sourcev1.Bucket{}). + Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(bucketReadyCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_notify(t *testing.T) { + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.Bucket) + newObjBeforeFunc func(obj *sourcev1.Bucket) + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + }, + wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal Succeeded stored artifact with 2 fetched files from", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + BucketName: "test-bucket", + }, + } + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &BucketReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + index := index.NewDigester(index.WithIndex(map[string]string{ + "zzz": "qqq", + "bbb": "ddd", + })) + reconciler.notify(ctx, oldObj, newObj, index, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { + tests := []struct { + name string + bucketProvider string + stsConfig *sourcev1.BucketSTSSpec + err string + }{ + { + name: "gcp unsupported", + bucketProvider: "gcp", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "STS configuration is only supported for the 'aws' and 'generic' Bucket providers", + }, + { + name: "azure unsupported", + bucketProvider: "azure", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "STS configuration is only supported for the 'aws' and 'generic' Bucket providers", + }, + { + name: "aws supported", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + }, + { + name: "invalid endpoint", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "test", + }, + err: "spec.sts.endpoint in body should match '^(http|https)://.*$'", + }, + { + name: "gcp can be created without STS config", + bucketProvider: "gcp", + }, + { + name: "azure can be created without STS config", + bucketProvider: "azure", + }, + { + name: "generic can be created without STS config", + bucketProvider: "generic", + }, + { + name: "aws can be created without STS config", + bucketProvider: "aws", + }, + { + name: "ldap unsupported for aws", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + }, + err: "'aws' is the only supported STS provider for the 'aws' Bucket provider", + }, + { + name: "aws unsupported for generic", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "'ldap' is the only supported STS provider for the 'generic' Bucket provider", + }, + { + name: "aws does not require a secret", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + SecretRef: &meta.LocalObjectReference{}, + }, + err: "spec.sts.secretRef is not required for the 'aws' STS provider", + }, + { + name: "aws does not require a cert secret", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + CertSecretRef: &meta.LocalObjectReference{}, + }, + err: "spec.sts.certSecretRef is not required for the 'aws' STS provider", + }, + { + name: "ldap may use a secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + SecretRef: &meta.LocalObjectReference{}, + }, + }, + { + name: "ldap may use a cert secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + CertSecretRef: &meta.LocalObjectReference{}, + }, + }, + { + name: "ldap may not use a secret or cert secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: tt.bucketProvider, + BucketName: "test", + Endpoint: "test", + Suspend: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + STS: tt.stsConfig, + }, + } + + err := testEnv.Create(ctx, obj) + if err == nil { + defer func() { + err := testEnv.Delete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + }() + } + + if tt.err != "" { + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/internal/controller/common_test.go b/internal/controller/common_test.go new file mode 100644 index 000000000..d9dcf88c1 --- /dev/null +++ b/internal/controller/common_test.go @@ -0,0 +1,146 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + "github.com/fluxcd/source-controller/internal/object" +) + +// waitForSourceDeletion is a generic test helper to wait for object deletion of +// any source kind. +func waitForSourceDeletion(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + + key := client.ObjectKeyFromObject(obj) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +// waitForSuspended is a generic test helper to wait for object to be suspended +// of any source kind. +func waitForSuspended(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + + key := client.ObjectKeyFromObject(obj) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + suspended, err := object.GetSuspend(obj) + if err != nil { + return false + } + return suspended == true + }, timeout).Should(BeTrue()) +} + +// waitForSourceReadyWithArtifact is a generic test helper to wait for an object +// to be ready of any source kind that have artifact in status when ready. +func waitForSourceReadyWithArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + waitForSourceReady(ctx, g, obj, true) +} + +// waitForSourceReadyWithoutArtifact is a generic test helper to wait for an object +// to be ready of any source kind that don't have artifact in status when ready. +func waitForSourceReadyWithoutArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + waitForSourceReady(ctx, g, obj, false) +} + +// waitForSourceReady is a generic test helper to wait for an object to be +// ready of any source kind. +func waitForSourceReady(ctx context.Context, g *WithT, obj conditions.Setter, withArtifact bool) { + g.THelper() + + key := client.ObjectKeyFromObject(obj) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if withArtifact { + artifact, err := object.GetArtifact(obj) + if err != nil { + return false + } + if artifact == nil { + return false + } + } + if !conditions.IsReady(obj) { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + statusObservedGen, err := object.GetStatusObservedGeneration(obj) + if err != nil { + return false + } + return obj.GetGeneration() == readyCondition.ObservedGeneration && + obj.GetGeneration() == statusObservedGen + }, timeout).Should(BeTrue()) +} + +// testSuspendedObjectDeleteWithArtifact is a generic test helper to test if a +// suspended object can be deleted for objects that have artifact in status when +// ready. +func testSuspendedObjectDeleteWithArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + testSuspendedObjectDelete(ctx, g, obj, true) +} + +// testSuspendedObjectDeleteWithoutArtifact is a generic test helper to test if +// a suspended object can be deleted for objects that don't have artifact in +// status when ready. +func testSuspendedObjectDeleteWithoutArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + testSuspendedObjectDelete(ctx, g, obj, false) +} + +// testSuspendedObjectDelete is a generic test helper to test if a suspended +// object can be deleted. +func testSuspendedObjectDelete(ctx context.Context, g *WithT, obj conditions.Setter, withArtifact bool) { + g.THelper() + + // Create the object and wait for it to be ready. + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + waitForSourceReady(ctx, g, obj, withArtifact) + + // Suspend the object. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(object.SetSuspend(obj, true)).ToNot(HaveOccurred()) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + waitForSuspended(ctx, g, obj) + + // Delete the object. + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + waitForSourceDeletion(ctx, g, obj) +} diff --git a/internal/controller/gitrepository_controller.go b/internal/controller/gitrepository_controller.go new file mode 100644 index 000000000..1208c8ae0 --- /dev/null +++ b/internal/controller/gitrepository_controller.go @@ -0,0 +1,1361 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + "github.com/fluxcd/pkg/git/github" + "github.com/fluxcd/pkg/runtime/logger" + "github.com/fluxcd/pkg/runtime/secrets" + "github.com/go-git/go-git/v5/plumbing/transport" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/git/gogit" + "github.com/fluxcd/pkg/git/repository" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/features" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" +) + +// gitRepositoryReadyCondition contains the information required to summarize a +// v1.GitRepository Ready Condition. +var gitRepositoryReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// gitRepositoryFailConditions contains the conditions that represent a failure. +var gitRepositoryFailConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.StorageOperationFailedCondition, +} + +// getPatchOptions composes patch options based on the given parameters. +// It is used as the options used when patching an object. +func getPatchOptions(ownedConditions []string, controllerName string) []patch.Option { + return []patch.Option{ + patch.WithOwnedConditions{Conditions: ownedConditions}, + patch.WithFieldOwner(controllerName), + } +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// GitRepositoryReconciler reconciles a v1.GitRepository object. +type GitRepositoryReconciler struct { + client.Client + kuberecorder.EventRecorder + helper.Metrics + + Storage *storage.Storage + ControllerName string + TokenCache *cache.TokenCache + + requeueDependency time.Duration + features map[string]bool + + patchOptions []patch.Option +} + +type GitRepositoryReconcilerOptions struct { + DependencyRequeueInterval time.Duration + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// gitRepositoryReconcileFunc is the function type for all the +// v1.GitRepository (sub)reconcile functions. +type gitRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) + +func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) +} + +func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error { + r.patchOptions = getPatchOptions(gitRepositoryReadyCondition.Owned, r.ControllerName) + + r.requeueDependency = opts.DependencyRequeueInterval + + if r.features == nil { + r.features = features.FeatureGates() + } + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.GitRepository{}, builder.WithPredicates( + predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), + )). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the GitRepository + obj := &sourcev1.GitRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(gitRepositoryReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition + // between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []gitRepositoryReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileInclude, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the gitRepositoryReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *GitRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, reconcilers []gitRepositoryReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var recAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + recAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case recAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + // Create temp dir for Git clone + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer func() { + if err = os.RemoveAll(tmpDir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") + } + }() + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + + // Run the sub-reconcilers and build the result of reconciliation. + var ( + commit git.Commit + includes artifactSet + + res sreconcile.Result + resErr error + ) + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, &commit, &includes, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, commit, res, resErr) + + return res, resErr +} + +// notify emits notification related to the result of reconciliation. +func (r *GitRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.GitRepository, commit git.Commit, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact, no-op reconciliation + // and recovery from any failure. + if r.shouldNotify(oldObj, newObj, res, resErr) { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + // A partial commit due to no-op clone doesn't contain the commit + // message information. Have separate message for it. + var message string + if git.IsConcreteCommit(commit) { + message = fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage()) + } else { + message = fmt.Sprintf("stored artifact for commit '%s'", commitReference(newObj, &commit)) + } + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + "NewArtifact", message) + ctrl.LoggerFrom(ctx).Info(message) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, gitRepositoryFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) + } + } + } +} + +// shouldNotify analyzes the result of subreconcilers and determines if a +// notification should be sent. It decides about the final informational +// notifications after the reconciliation. Failure notification and in-line +// notifications are not handled here. +func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepository, res sreconcile.Result, resErr error) bool { + // Notify for successful reconciliation. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + return true + } + // Notify for no-op reconciliation with ignore error. + if resErr != nil && res == sreconcile.ResultEmpty && newObj.Status.Artifact != nil { + // Convert to Generic error and check for ignore. + if ge, ok := resErr.(*serror.Generic); ok { + return ge.Ignore + } + } + return false +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of the Artifact in the Status of the object is updated, to +// ensure it matches the Storage server hostname of current runtime. +func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource ensures the upstream Git repository and reference can be +// cloned and checked out using the specified configuration, and observes its +// state. It also checks if the included repositories are available for use. +// +// The included repositories are fetched and their metadata are stored. In case +// one of the included repositories isn't ready, it records +// v1.IncludeUnavailableCondition=True and returns early. When all the +// included repositories are ready, it removes +// v1.IncludeUnavailableCondition from the object. +// When the included artifactSet differs from the current set in the Status of +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. +// The repository is cloned to the given dir, using the specified configuration +// to check out the reference. In case of an error during this process +// (including transient errors), it records v1.FetchFailedCondition=True +// and returns early. +// On a successful checkout, it removes v1.FetchFailedCondition and +// compares the current revision of HEAD to the revision of the Artifact in the +// Status of the object. It records v1.ArtifactOutdatedCondition=True when +// they differ. +// If specified, the signature of the Git commit is verified. If the signature +// can not be verified or the verification fails, it records +// v1.SourceVerifiedCondition=False and returns early. When successful, +// it records v1.SourceVerifiedCondition=True. +// When all the above is successful, the given Commit pointer is set to the +// commit of the checked out Git repository. +// +// If the optimized git clone feature is enabled, it checks if the remote repo +// and the local artifact are on the same revision, and no other source content +// related configurations have changed since last reconciliation. If there's a +// change, it short-circuits the whole reconciliation with an early return. +func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + // Remove previously failed source verification status conditions. The + // failing verification should be recalculated. But an existing successful + // verification need not be removed as it indicates verification of previous + // version. + if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + + var proxyOpts *transport.ProxyOptions + var proxyURL *url.URL + if obj.Spec.ProxySecretRef != nil { + var err error + secretRef := types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to configure proxy options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e + } + proxyOpts = &transport.ProxyOptions{URL: proxyURL.String()} + } + + u, err := url.Parse(obj.Spec.URL) + if err != nil { + e := serror.NewStalling( + fmt.Errorf("failed to parse url '%s': %w", obj.Spec.URL, err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + authOpts, err := r.getAuthOpts(ctx, obj, *u, proxyURL) + if err != nil { + // Return error as the world as observed may change + return sreconcile.ResultEmpty, err + } + + // Fetch the included artifact metadata. + artifacts, err := r.fetchIncludes(ctx, obj) + if err != nil { + return sreconcile.ResultEmpty, err + } + + // Observe if the artifacts still match the previous included ones + if artifacts.Diff(obj.Status.IncludedArtifacts) { + message := "included artifacts differ from last observed includes" + if obj.Status.IncludedArtifacts != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + + // Persist the ArtifactSet. + *includes = *artifacts + + c, err := r.gitCheckout(ctx, obj, authOpts, proxyOpts, dir, true) + if err != nil { + return sreconcile.ResultEmpty, err + } + if c == nil { + e := serror.NewGeneric( + fmt.Errorf("git repository is empty"), + "EmptyGitRepository", + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + // Assign the commit to the shared commit reference. + *commit = *c + + // If it's a partial commit obtained from an existing artifact, check if the + // reconciliation can be skipped if other configurations have not changed. + if !git.IsConcreteCommit(*commit) { + // Check if the content config contributing to the artifact has changed. + if !gitContentConfigChanged(obj, includes) { + ge := serror.NewGeneric( + fmt.Errorf("no changes since last reconcilation: observed revision '%s'", + commitReference(obj, commit)), sourcev1.GitOperationSucceedReason, + ) + ge.Notification = false + ge.Ignore = true + // Log it as this will not be passed to the runtime. + ge.Log = true + ge.Event = corev1.EventTypeNormal + // Remove any stale fetch failed condition. + conditions.Delete(obj, sourcev1.FetchFailedCondition) + // IMPORTANT: This must be set to ensure that the observed + // generation of this condition is updated. In case of full + // reconciliation reconcileArtifact() ensures that it's set at the + // very end. + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact for revision '%s'", commitReference(obj, commit)) + // TODO: Find out if such condition setting is needed when commit + // signature verification is enabled. + return sreconcile.ResultEmpty, ge + } + + // If we can't skip the reconciliation, checkout again without any + // optimization. + c, err := r.gitCheckout(ctx, obj, authOpts, proxyOpts, dir, false) + if err != nil { + return sreconcile.ResultEmpty, err + } + *commit = *c + } + ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commitReference(obj, commit)) + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Validate sparse checkout paths after successful checkout. + if err := r.validateSparseCheckoutPaths(ctx, obj, dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to sparse checkout directories : %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Verify commit signature + if result, err := r.verifySignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { + return result, err + } + + // Mark observations about the revision on the object + if !obj.GetArtifact().HasRevision(commitReference(obj, commit)) { + message := fmt.Sprintf("new upstream revision '%s'", commitReference(obj, commit)) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + return sreconcile.ResultSuccess, nil +} + +// getAuthOpts fetches the secret containing the auth options (if specified), +// constructs a git.AuthOptions object using those options along with the provided +// URL and returns it. +func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1.GitRepository, + u url.URL, proxyURL *url.URL) (*git.AuthOptions, error) { + var secret *corev1.Secret + var authData map[string][]byte + if obj.Spec.SecretRef != nil { + var err error + secret, err = r.getSecret(ctx, obj.Spec.SecretRef.Name, obj.GetNamespace()) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + authData = secret.Data + } + + // Configure authentication strategy to access the source + opts, err := git.NewAuthOptions(u, authData) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + + // Configure provider authentication if specified. + var getCreds func() (*authutils.GitCredentials, error) + switch provider := obj.GetProvider(); provider { + case sourcev1.GitProviderAzure: // If AWS or GCP are added in the future they can be added here separated by a comma. + getCreds = func() (*authutils.GitCredentials, error) { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := serror.NewStalling(fmt.Errorf(msgFmt, gate), meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "%s", err) + return nil, err + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.GitRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } + + return authutils.GetGitCredentials(ctx, provider, opts...) + } + case sourcev1.GitProviderGitHub: + // if provider is github, but secret ref is not specified + if obj.Spec.SecretRef == nil { + e := serror.NewStalling( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + authMethods, err := secrets.AuthMethodsFromSecret(ctx, secret, secrets.WithTLSSystemCertPool()) + if err != nil { + return nil, err + } + if !authMethods.HasGitHubAppData() { + e := serror.NewGeneric( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + getCreds = func() (*authutils.GitCredentials, error) { + var appOpts []github.OptFunc + + appOpts = append(appOpts, github.WithAppData(authMethods.GitHubAppData)) + + if proxyURL != nil { + appOpts = append(appOpts, github.WithProxyURL(proxyURL)) + } + + if r.TokenCache != nil { + appOpts = append(appOpts, github.WithCache(r.TokenCache, sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile)) + } + + if authMethods.HasTLS() { + appOpts = append(appOpts, github.WithTLSConfig(authMethods.TLS)) + } + + username, password, err := github.GetCredentials(ctx, appOpts...) + if err != nil { + return nil, err + } + return &authutils.GitCredentials{ + Username: username, + Password: password, + }, nil + } + default: + // analyze secret, if it has github app data, perhaps provider should have been github. + if appID := authData[github.KeyAppID]; len(appID) != 0 { + e := serror.NewGeneric( + fmt.Errorf("secretRef '%s/%s' has github app data but provider is not set to github", obj.GetNamespace(), obj.Spec.SecretRef.Name), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + } + if getCreds != nil { + creds, err := getCreds() + if err != nil { + // Check if it's already a structured error and preserve it + switch err.(type) { + case *serror.Stalling, *serror.Generic: + return nil, err + } + + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + opts.BearerToken = creds.BearerToken + opts.Username = creds.Username + opts.Password = creds.Password + } + return opts, nil +} + +func (r *GitRepositoryReconciler) getSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, key, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s/%s': %w", namespace, name, err) + } + return secret, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact and/or artifactSet (includes) and observed artifact +// content config do not differ from the object's current, it returns early. +// Source ignore patterns are loaded, and the given directory is archived while +// taking these patterns into account. +// On a successful archive, the Artifact, Includes, observed ignore, recurse +// submodules and observed include in the Status of the object are set. +func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + + // Create potential new artifact with current available metadata + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commitReference(obj, commit), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) + + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if curArtifact := obj.GetArtifact(); curArtifact.HasRevision(artifact.Revision) && + !includes.Diff(obj.Status.IncludedArtifacts) && + !gitContentConfigChanged(obj, includes) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact for revision '%s'", curArtifact.Revision) + } + }() + + // The artifact is up-to-date + if curArtifact := obj.GetArtifact(); curArtifact.HasRevision(artifact.Revision) && + !includes.Diff(obj.Status.IncludedArtifacts) && + !gitContentConfigChanged(obj, includes) { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", curArtifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to stat target artifact path: %w", err), + sourcev1.StatOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } else if !f.IsDir() { + e := serror.NewGeneric( + fmt.Errorf("invalid target path: '%s' is not a directory", dir), + sourcev1.InvalidPathReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) + } + defer unlock() + + // Load ignore rules for archiving + ignoreDomain := strings.Split(dir, string(filepath.Separator)) + ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to load source ignore patterns from repository: %w", err), + "SourceIgnoreError", + ) + } + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) + } + + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to archive artifact to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Record the observations on the object. + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.IncludedArtifacts = *includes + obj.Status.ObservedIgnore = obj.Spec.Ignore + obj.Status.ObservedRecurseSubmodules = obj.Spec.RecurseSubmodules + obj.Status.ObservedInclude = obj.Spec.Include + obj.Status.ObservedSparseCheckout = obj.Spec.SparseCheckout + + // Remove the deprecated symlink. + // TODO(hidde): remove 2 minor versions from introduction of v1. + symArtifact := artifact.DeepCopy() + symArtifact.Path = filepath.Join(filepath.Dir(symArtifact.Path), "latest.tar.gz") + if fi, err := os.Lstat(r.Storage.LocalPath(artifact)); err == nil { + if fi.Mode()&os.ModeSymlink != 0 { + if err := os.Remove(r.Storage.LocalPath(*symArtifact)); err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to remove (deprecated) symlink: %s", err) + } + } + } + + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileInclude reconciles the on the object specified +// v1.GitRepositoryInclude list by copying their Artifact (sub)contents to +// the specified paths in the given directory. +// +// When one of the includes is unavailable, it marks the object with +// v1.IncludeUnavailableCondition=True and returns early. +// When the copy operations are successful, it removes the +// v1.IncludeUnavailableCondition from the object. +// When the composed artifactSet differs from the current set in the Status of +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + + for i, incl := range obj.Spec.Include { + // Do this first as it is much cheaper than copy operations + toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), + "IllegalPath", + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Get artifact at the same include index. The artifactSet is created + // such that the index of artifactSet matches with the index of Include. + // Hence, index is used here to pick the associated artifact from + // includes. + var artifact *meta.Artifact + for j, art := range *includes { + if i == j { + artifact = art + } + } + + // Copy artifact (sub)contents to configured directory. + if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), + "CopyFailure", + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) + return sreconcile.ResultSuccess, nil +} + +// gitCheckout builds checkout options with the given configurations and +// performs a git checkout. +func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1.GitRepository, + authOpts *git.AuthOptions, proxyOpts *transport.ProxyOptions, dir string, optimized bool) (*git.Commit, error) { + + // Configure checkout strategy. + cloneOpts := repository.CloneConfig{ + RecurseSubmodules: obj.Spec.RecurseSubmodules, + ShallowClone: true, + } + if ref := obj.Spec.Reference; ref != nil { + cloneOpts.Branch = ref.Branch + cloneOpts.Commit = ref.Commit + cloneOpts.Tag = ref.Tag + cloneOpts.SemVer = ref.SemVer + cloneOpts.RefName = ref.Name + } + if obj.Spec.SparseCheckout != nil { + // Trim any leading "./" in the directory paths since underlying go-git API does not honor them. + sparseCheckoutDirs := make([]string, len(obj.Spec.SparseCheckout)) + for i, path := range obj.Spec.SparseCheckout { + sparseCheckoutDirs[i] = strings.TrimPrefix(path, "./") + } + cloneOpts.SparseCheckoutDirectories = sparseCheckoutDirs + } + // Only if the object has an existing artifact in storage, attempt to + // short-circuit clone operation. reconcileStorage has already verified + // that the artifact exists. + if optimized && conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) { + if artifact := obj.GetArtifact(); artifact != nil { + cloneOpts.LastObservedCommit = artifact.Revision + } + } + + gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + clientOpts := []gogit.ClientOption{gogit.WithDiskStorage()} + if authOpts.Transport == git.HTTP { + clientOpts = append(clientOpts, gogit.WithInsecureCredentialsOverHTTP()) + } + if proxyOpts != nil { + clientOpts = append(clientOpts, gogit.WithProxy(*proxyOpts)) + } + + gitReader, err := gogit.NewClient(dir, authOpts, clientOpts...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create Git client: %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + defer gitReader.Close() + + commit, err := gitReader.Clone(gitCtx, obj.Spec.URL, cloneOpts) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to checkout and determine revision: %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + + return commit, nil +} + +// fetchIncludes fetches artifact metadata of all the included repos. +func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *sourcev1.GitRepository) (*artifactSet, error) { + artifacts := make(artifactSet, len(obj.Spec.Include)) + for i, incl := range obj.Spec.Include { + // Retrieve the included GitRepository. + dep := &sourcev1.GitRepository{} + if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { + e := serror.NewWaiting( + fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err), + "NotFound", + ) + e.RequeueAfter = r.requeueDependency + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, "%s", e) + return nil, e + } + + // Confirm include has an artifact + if dep.GetArtifact() == nil { + e := serror.NewWaiting( + fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name), + "NoArtifact", + ) + e.RequeueAfter = r.requeueDependency + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, "%s", e) + return nil, e + } + + artifacts[i] = dep.GetArtifact().DeepCopy() + } + + // We now know all the includes are available. + conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) + + return &artifacts, nil +} + +// verifySignature verifies the signature of the given Git commit and/or its referencing tag +// depending on the verification mode specified on the object. +// If the signature can not be verified or the verification fails, it records +// v1.SourceVerifiedCondition=False and returns. +// When successful, it records v1.SourceVerifiedCondition=True. +// If no verification mode is specified on the object, the +// v1.SourceVerifiedCondition Condition is removed. +func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { + // Check if there is a commit verification is configured and remove any old + // observations if there is none + if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { + obj.Status.SourceVerificationMode = nil + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + return sreconcile.ResultSuccess, nil + } + + // Get secret with GPG data + publicKeySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: obj.Spec.Verification.SecretRef.Name, + } + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { + e := serror.NewGeneric( + fmt.Errorf("PGP public keys secret error: %w", err), + "VerificationError", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + var keyRings []string + for _, v := range secret.Data { + keyRings = append(keyRings, string(v)) + } + + var message strings.Builder + if obj.Spec.Verification.VerifyTag() { + // If we need to verify a tag object, then the commit must have a tag + // that points to it. If it does not, then its safe to asssume that + // the checkout didn't happen via a tag reference, thus the object can + // be marked as stalled. + tag := commit.ReferencingTag + if tag == nil { + err := serror.NewStalling( + errors.New("cannot verify tag object's signature if a tag reference is not specified"), + "InvalidVerificationMode", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, "%s", err) + return sreconcile.ResultEmpty, err + } + if !git.IsSignedTag(*tag) { + // If the tag was not signed then we can't verify its signature + // but since the upstream tag object can change at any time, we can't + // mark the object as stalled. + err := serror.NewGeneric( + fmt.Errorf("cannot verify signature of tag '%s' since it is not signed", commit.ReferencingTag.String()), + "InvalidGitObject", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, "%s", err) + return sreconcile.ResultEmpty, err + } + + // Verify tag with GPG data from secret + tagEntity, err := tag.Verify(keyRings...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("signature verification of tag '%s' failed: %w", tag.String(), err), + "InvalidTagSignature", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + // Return error in the hope the secret changes + return sreconcile.ResultEmpty, e + } + + message.WriteString(fmt.Sprintf("verified signature of\n\t- tag '%s' with key '%s'", tag.String(), tagEntity)) + } + + if obj.Spec.Verification.VerifyHEAD() { + // Verify commit with GPG data from secret + headEntity, err := commit.Verify(keyRings...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err), + "InvalidCommitSignature", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + // Return error in the hope the secret changes + return sreconcile.ResultEmpty, e + } + // If we also verified the tag previously, then append to the message. + if message.Len() > 0 { + message.WriteString(fmt.Sprintf("\n\t- commit '%s' with key '%s'", commit.Hash.String(), headEntity)) + } else { + message.WriteString(fmt.Sprintf("verified signature of\n\t- commit '%s' with key '%s'", commit.Hash.String(), headEntity)) + } + } + + reason := meta.SucceededReason + mode := obj.Spec.Verification.GetMode() + obj.Status.SourceVerificationMode = &mode + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, reason, "%s", message.String()) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, reason, "%s", message.String()) + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless the +// deletion timestamp on the object is set. Which will result in the +// removal of all Artifacts for the objects. +func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// gitContentConfigChanged evaluates the current spec with the observations of +// the artifact in the status to determine if artifact content configuration has +// changed and requires rebuilding the artifact. Rebuilding the artifact is also +// required if the object needs to be (re)verified. +func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet) bool { + if !ptr.Equal(obj.Spec.Ignore, obj.Status.ObservedIgnore) { + return true + } + if obj.Spec.RecurseSubmodules != obj.Status.ObservedRecurseSubmodules { + return true + } + if len(obj.Spec.Include) != len(obj.Status.ObservedInclude) { + return true + } + if requiresVerification(obj) { + return true + } + if len(obj.Spec.SparseCheckout) != len(obj.Status.ObservedSparseCheckout) { + return true + } + for index, dir := range obj.Spec.SparseCheckout { + if dir != obj.Status.ObservedSparseCheckout[index] { + return true + } + } + + // Convert artifactSet to index addressable artifacts and ensure that it and + // the included artifacts include all the include from the spec. + artifacts := []*meta.Artifact(*includes) + if len(obj.Spec.Include) != len(artifacts) { + return true + } + if len(obj.Spec.Include) != len(obj.Status.IncludedArtifacts) { + return true + } + + // The order of spec.include, status.IncludeArtifacts and + // status.observedInclude are the same. Compare the values by index. + for index, incl := range obj.Spec.Include { + observedIncl := obj.Status.ObservedInclude[index] + observedInclArtifact := obj.Status.IncludedArtifacts[index] + currentIncl := artifacts[index] + + // Check if include is the same in spec and status. + if !gitRepositoryIncludeEqual(incl, observedIncl) { + return true + } + + // Check if the included repositories are still the same. + if !observedInclArtifact.HasRevision(currentIncl.Revision) { + return true + } + if !observedInclArtifact.HasDigest(currentIncl.Digest) { + return true + } + } + return false +} + +// validateSparseCheckoutPaths checks if the sparse checkout paths exist in the cloned repository. +func (r *GitRepositoryReconciler) validateSparseCheckoutPaths(ctx context.Context, obj *sourcev1.GitRepository, dir string) error { + if obj.Spec.SparseCheckout != nil { + for _, path := range obj.Spec.SparseCheckout { + fullPath := filepath.Join(dir, path) + if _, err := os.Lstat(fullPath); err != nil { + return fmt.Errorf("sparse checkout dir '%s' does not exist in repository: %w", path, err) + } + } + } + return nil +} + +// Returns true if both GitRepositoryIncludes are equal. +func gitRepositoryIncludeEqual(a, b sourcev1.GitRepositoryInclude) bool { + if a.GitRepositoryRef != b.GitRepositoryRef { + return false + } + if a.FromPath != b.FromPath { + return false + } + if a.ToPath != b.ToPath { + return false + } + return true +} + +func commitReference(obj *sourcev1.GitRepository, commit *git.Commit) string { + if obj.Spec.Reference != nil && obj.Spec.Reference.Name != "" { + return commit.AbsoluteReference() + } + return commit.String() +} + +// requiresVerification inspects a GitRepository's verification spec and its status +// to determine whether the Git repository needs to be verified again. It does so by +// first checking if the GitRepository has a verification spec. If it does, then +// it returns true based on the following three conditions: +// +// - If the object does not have a observed verification mode in its status. +// - If the observed verification mode indicates that only the tag had been +// verified earlier and the HEAD also needs to be verified now. +// - If the observed verification mode indicates that only the HEAD had been +// verified earlier and the tag also needs to be verified now. +func requiresVerification(obj *sourcev1.GitRepository) bool { + if obj.Spec.Verification != nil { + observedMode := obj.Status.SourceVerificationMode + mode := obj.Spec.Verification.GetMode() + if observedMode == nil { + return true + } + if (*observedMode == sourcev1.ModeGitTag && (mode == sourcev1.ModeGitHEAD || mode == sourcev1.ModeGitTagAndHEAD)) || + (*observedMode == sourcev1.ModeGitHEAD && (mode == sourcev1.ModeGitTag || mode == sourcev1.ModeGitTagAndHEAD)) { + return true + } + } + return false +} diff --git a/tests/fuzz/gitrepository_fuzzer.go b/internal/controller/gitrepository_controller_fuzz_test.go similarity index 75% rename from tests/fuzz/gitrepository_fuzzer.go rename to internal/controller/gitrepository_controller_fuzz_test.go index 0c495930a..c9c136820 100644 --- a/tests/fuzz/gitrepository_fuzzer.go +++ b/internal/controller/gitrepository_controller_fuzz_test.go @@ -1,5 +1,5 @@ -//go:build gofuzz -// +build gofuzz +//go:build gofuzz_libfuzzer +// +build gofuzz_libfuzzer /* Copyright 2022 The Flux authors @@ -17,7 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "context" @@ -35,6 +35,7 @@ import ( "path/filepath" "strings" "sync" + "testing" "time" fuzz "github.com/AdaLogics/go-fuzz-headers" @@ -58,10 +59,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + intstorage "github.com/fluxcd/pkg/artifact/digest" "github.com/fluxcd/pkg/gittestserver" + "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/testenv" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - "github.com/fluxcd/source-controller/controllers" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -75,7 +78,7 @@ var ( cfg *rest.Config testEnv *testenv.Environment - storage *controllers.Storage + storage *intstorage.Storage examplePublicKey []byte examplePrivateKey []byte @@ -87,277 +90,140 @@ var ( var testFiles embed.FS const ( - defaultBinVersion = "1.23" + defaultBinVersion = "1.24" lettersAndNumbers = "abcdefghijklmnopqrstuvwxyz123456789" lettersNumbersAndDash = "abcdefghijklmnopqrstuvwxyz123456789-" ) -func envtestBinVersion() string { - if binVersion := os.Getenv("ENVTEST_BIN_VERSION"); binVersion != "" { - return binVersion - } - return defaultBinVersion -} - -func ensureDependencies() error { - if _, err := os.Stat("/.dockerenv"); os.IsNotExist(err) { - return nil - } +// FuzzRandomGitFiles implements a fuzzer that +// targets the GitRepository reconciler. +func FuzzRandomGitFiles(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + initter.Do(func() { + utilruntime.Must(ensureDependencies()) + }) + + f := fuzz.NewConsumer(data) + namespace, deleteNamespace, err := createNamespace(f) + if err != nil { + return + } + defer deleteNamespace() - if os.Getenv("KUBEBUILDER_ASSETS") == "" { - binVersion := envtestBinVersion() - cmd := exec.Command("/usr/bin/bash", "-c", fmt.Sprintf(`go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \ - /root/go/bin/setup-envtest use -p path %s`, binVersion)) + gitServerURL, stopGitServer := createGitServer(f) + defer stopGitServer() - cmd.Env = append(os.Environ(), "GOPATH=/root/go") - assetsPath, err := cmd.Output() + fs := memfs.New() + gitrepo, err := git.Init(memory.NewStorage(), fs) if err != nil { - return err + panic(err) } - os.Setenv("KUBEBUILDER_ASSETS", string(assetsPath)) - } - - // Output all embedded testdata files - embedDirs := []string{"testdata/crd", "testdata/certs"} - for _, dir := range embedDirs { - err := os.MkdirAll(dir, 0o700) + wt, err := gitrepo.Worktree() if err != nil { - return fmt.Errorf("mkdir %s: %v", dir, err) + panic(err) } - templates, err := fs.ReadDir(testFiles, dir) + // Create random files for the git source + err = createRandomFiles(f, fs, wt) if err != nil { - return fmt.Errorf("reading embedded dir: %v", err) + return } - for _, template := range templates { - fileName := fmt.Sprintf("%s/%s", dir, template.Name()) - fmt.Println(fileName) - - data, err := testFiles.ReadFile(fileName) - if err != nil { - return fmt.Errorf("reading embedded file %s: %v", fileName, err) - } - - os.WriteFile(fileName, data, 0o600) - if err != nil { - return fmt.Errorf("writing %s: %v", fileName, err) - } + commit, err := pushFilesToGit(gitrepo, wt, gitServerURL.String()) + if err != nil { + return } - } - - startEnvServer(func(m manager.Manager) { - utilruntime.Must((&controllers.GitRepositoryReconciler{ - Client: m.GetClient(), - Storage: storage, - }).SetupWithManager(m)) - }) - - return nil -} - -func startEnvServer(setupReconcilers func(manager.Manager)) *envtest.Environment { - testEnv := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("testdata", "crd")}, - } - fmt.Println("Starting the test environment") - cfg, err := testEnv.Start() - if err != nil { - panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) - } - - utilruntime.Must(loadExampleKeys()) - utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpStoragePath) - storage, err = controllers.NewStorage(tmpStoragePath, "localhost:5050", time.Minute*1, 2) - if err != nil { - panic(err) - } - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - cert, err := tls.X509KeyPair(examplePublicKey, examplePrivateKey) - if err != nil { - panic(err) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(exampleCA) - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - } - tlsConfig.BuildNameToCertificate() - - var transport = httptransport.NewClient(&http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, - }) - gitclient.InstallProtocol("https", transport) - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - if err != nil { - panic(err) - } - if k8sClient == nil { - panic("cfg is nil but should not be") - } - - k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - if err != nil { - panic(err) - } - - setupReconcilers(k8sManager) - - time.Sleep(2 * time.Second) - go func() { - fmt.Println("Starting k8sManager...") - utilruntime.Must(k8sManager.Start(context.TODO())) - }() - - return testEnv -} + created, err := createGitRepository(f, gitServerURL.String(), commit.String(), namespace.Name) + if err != nil { + return + } + err = k8sClient.Create(context.Background(), created) + if err != nil { + return + } + defer k8sClient.Delete(context.Background(), created) -// FuzzRandomGitFiles implements a fuzzer that -// targets the GitRepository reconciler. -func FuzzRandomGitFiles(data []byte) int { - initter.Do(func() { - utilruntime.Must(ensureDependencies()) + // Let the reconciler do its thing: + time.Sleep(60 * time.Millisecond) }) - - f := fuzz.NewConsumer(data) - namespace, deleteNamespace, err := createNamespace(f) - if err != nil { - return 0 - } - defer deleteNamespace() - - gitServerURL, stopGitServer := createGitServer(f) - defer stopGitServer() - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - if err != nil { - panic(err) - } - wt, err := gitrepo.Worktree() - if err != nil { - panic(err) - } - - // Create random files for the git source - err = createRandomFiles(f, fs, wt) - if err != nil { - return 0 - } - - commit, err := pushFilesToGit(gitrepo, wt, gitServerURL.String()) - if err != nil { - return 0 - } - created, err := createGitRepository(f, gitServerURL.String(), commit.String(), namespace.Name) - if err != nil { - return 0 - } - err = k8sClient.Create(context.Background(), created) - if err != nil { - return 0 - } - defer k8sClient.Delete(context.Background(), created) - - // Let the reconciler do its thing: - time.Sleep(60 * time.Millisecond) - - return 1 } // FuzzGitResourceObject implements a fuzzer that targets // the GitRepository reconciler. -func FuzzGitResourceObject(data []byte) int { - initter.Do(func() { - utilruntime.Must(ensureDependencies()) - }) - - f := fuzz.NewConsumer(data) - - // Create this early because if it fails, then the fuzzer - // does not need to proceed. - repository := &sourcev1.GitRepository{} - err := f.GenerateStruct(repository) - if err != nil { - return 0 - } +func FuzzGitResourceObject(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + initter.Do(func() { + utilruntime.Must(ensureDependencies()) + }) + + f := fuzz.NewConsumer(data) + + // Create this early because if it fails, then the fuzzer + // does not need to proceed. + repository := &sourcev1.GitRepository{} + err := f.GenerateStruct(repository) + if err != nil { + return + } - metaName, err := f.GetStringFrom(lettersNumbersAndDash, 59) - if err != nil { - return 0 - } + metaName, err := f.GetStringFrom(lettersNumbersAndDash, 59) + if err != nil { + return + } - gitServerURL, stopGitServer := createGitServer(f) - defer stopGitServer() + gitServerURL, stopGitServer := createGitServer(f) + defer stopGitServer() - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - if err != nil { - return 0 - } - wt, err := gitrepo.Worktree() - if err != nil { - return 0 - } + fs := memfs.New() + gitrepo, err := git.Init(memory.NewStorage(), fs) + if err != nil { + return + } + wt, err := gitrepo.Worktree() + if err != nil { + return + } - // Add a file - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - if err != nil { - return 0 - } + // Add a file + ff, _ := fs.Create("fixture") + _ = ff.Close() + _, err = wt.Add(fs.Join("fixture")) + if err != nil { + return + } - commit, err := pushFilesToGit(gitrepo, wt, gitServerURL.String()) - if err != nil { - return 0 - } + commit, err := pushFilesToGit(gitrepo, wt, gitServerURL.String()) + if err != nil { + return + } - namespace, deleteNamespace, err := createNamespace(f) - if err != nil { - return 0 - } - defer deleteNamespace() + namespace, deleteNamespace, err := createNamespace(f) + if err != nil { + return + } + defer deleteNamespace() - repository.Spec.URL = gitServerURL.String() - repository.Spec.Verification.Mode = "head" - repository.Spec.SecretRef = nil + repository.Spec.URL = gitServerURL.String() + repository.Spec.Verification.Mode = "head" + repository.Spec.SecretRef = nil - reference := &sourcev1.GitRepositoryRef{Branch: "some-branch"} - reference.Commit = strings.Replace(reference.Commit, "", commit.String(), 1) - repository.Spec.Reference = reference + reference := &sourcev1.GitRepositoryRef{Branch: "some-branch"} + reference.Commit = strings.Replace(reference.Commit, "", commit.String(), 1) + repository.Spec.Reference = reference - repository.ObjectMeta = metav1.ObjectMeta{ - Name: metaName, - Namespace: namespace.Name, - } - err = k8sClient.Create(context.Background(), repository) - if err != nil { - return 0 - } - defer k8sClient.Delete(context.Background(), repository) + repository.ObjectMeta = metav1.ObjectMeta{ + Name: metaName, + Namespace: namespace.Name, + } + err = k8sClient.Create(context.Background(), repository) + if err != nil { + return + } + defer k8sClient.Delete(context.Background(), repository) - // Let the reconciler do its thing. - time.Sleep(50 * time.Millisecond) - return 1 + // Let the reconciler do its thing. + time.Sleep(50 * time.Millisecond) + }) } func loadExampleKeys() (err error) { @@ -527,3 +393,143 @@ func createRandomFiles(f *fuzz.ConsumeFuzzer, fs billy.Filesystem, wt *git.Workt } return nil } + +func envtestBinVersion() string { + if binVersion := os.Getenv("ENVTEST_BIN_VERSION"); binVersion != "" { + return binVersion + } + return defaultBinVersion +} + +func ensureDependencies() error { + if _, err := os.Stat("/.dockerenv"); os.IsNotExist(err) { + return nil + } + + if os.Getenv("KUBEBUILDER_ASSETS") == "" { + binVersion := envtestBinVersion() + cmd := exec.Command("/usr/bin/bash", "-c", fmt.Sprintf(`go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \ + /root/go/bin/setup-envtest use -p path %s`, binVersion)) + + cmd.Env = append(os.Environ(), "GOPATH=/root/go") + assetsPath, err := cmd.Output() + if err != nil { + return err + } + os.Setenv("KUBEBUILDER_ASSETS", string(assetsPath)) + } + + // Output all embedded testdata files + embedDirs := []string{"testdata/crd", "testdata/certs"} + for _, dir := range embedDirs { + err := os.MkdirAll(dir, 0o700) + if err != nil { + return fmt.Errorf("mkdir %s: %v", dir, err) + } + + templates, err := fs.ReadDir(testFiles, dir) + if err != nil { + return fmt.Errorf("reading embedded dir: %v", err) + } + + for _, template := range templates { + fileName := fmt.Sprintf("%s/%s", dir, template.Name()) + fmt.Println(fileName) + + data, err := testFiles.ReadFile(fileName) + if err != nil { + return fmt.Errorf("reading embedded file %s: %v", fileName, err) + } + + os.WriteFile(fileName, data, 0o600) + if err != nil { + return fmt.Errorf("writing %s: %v", fileName, err) + } + } + } + + startEnvServer(func(m manager.Manager) { + utilruntime.Must((&GitRepositoryReconciler{ + Client: m.GetClient(), + Storage: storage, + }).SetupWithManagerAndOptions(m, GitRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + })) + }) + + return nil +} + +func startEnvServer(setupReconcilers func(manager.Manager)) *envtest.Environment { + testEnv := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("testdata", "crd")}, + } + fmt.Println("Starting the test environment") + cfg, err := testEnv.Start() + if err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } + + utilruntime.Must(loadExampleKeys()) + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + if err != nil { + panic(err) + } + defer os.RemoveAll(tmpStoragePath) + storage, err = intstorage.New(tmpStoragePath, "localhost:5050", time.Minute*1, 2) + if err != nil { + panic(err) + } + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + cert, err := tls.X509KeyPair(examplePublicKey, examplePrivateKey) + if err != nil { + panic(err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(exampleCA) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + } + tlsConfig.BuildNameToCertificate() + + var transport = httptransport.NewClient(&http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + }) + gitclient.InstallProtocol("https", transport) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + if err != nil { + panic(err) + } + if k8sClient == nil { + panic("cfg is nil but should not be") + } + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + if err != nil { + panic(err) + } + + setupReconcilers(k8sManager) + + time.Sleep(2 * time.Second) + go func() { + fmt.Println("Starting k8sManager...") + utilruntime.Must(k8sManager.Start(context.TODO())) + }() + + return testEnv +} diff --git a/internal/controller/gitrepository_controller_test.go b/internal/controller/gitrepository_controller_test.go new file mode 100644 index 000000000..f9f7a591d --- /dev/null +++ b/internal/controller/gitrepository_controller_test.go @@ -0,0 +1,3606 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/go-git/go-billy/v5/memfs" + gogit "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/storage/memory" + . "github.com/onsi/gomega" + sshtestdata "golang.org/x/crypto/ssh/testdata" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/git/github" + "github.com/fluxcd/pkg/gittestserver" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/ssh" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/features" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +const ( + encodedCommitFixture = `tree 35f0b28987e60d4b8dec1f707fd07fef5ad84abc +parent 8b52742dbc848eb0975e62ae00fbfa4f8108e835 +author Sanskar Jaiswal 1691045123 +0530 +committer Sanskar Jaiswal 1691068951 +0530 + +git/e2e: disable CGO while running e2e tests + +Disable CGO for Git e2e tests as it was originially required because of +our libgit2 client. Since we no longer maintain a libgit2 client, there +is no need to run the tests with CGO enabled. + +Signed-off-by: Sanskar Jaiswal +` + + malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879 +author Stefan Prodan 1633681364 +0300 +committer Stefan Prodan 1633681364 +0300 + +Update containerd and runc to fix CVEs + +Signed-off-by: Stefan Prodan +` + signatureCommitFixture = `-----BEGIN PGP SIGNATURE----- + +iQIzBAABCAAdFiEEOxEY0f3iSZ5rKQ+vWYLQJ5wif/0FAmTLqnEACgkQWYLQJ5wi +f/1mYw/+LRttvfPrfYl7ASUBGYSQuDzjeold8OO1LpmwjrKPpX4ivZbXHh+lJF0F +fqudKuJfJzeQCHsMZjnfgvXHd2VvxPh1jX6h3JLuNu7d4g1DtNQsKJtsLx7JW99X +J9Bb1xj0Ghh2PkrWEB9vpw+uZz4IhFrB+DNNLRNBkon3etrS1q57q8dhQFIhLI1y +ij3rq3kFHjrNNdokIv2ujyVJtWgy2fK2ELW5v2dznpykOo7hQEKgtOIHPBzGBFT0 +dUFjB99Qy4Qgjh3vWaY4fZ3u/vhp3swmw91OlDkFeyndWjDSZhzYnb7wY+U6z35C +aU4Gzc71CquSd/nTdOEkpuolBVWV5cBkM+Nxi8jtVGBeDDFE49j27a3lQ3+qtT7/ +q4FCe5Jw3GSOJvaLBLGmYVn9fc49t/28b5tkGtCHs3ATpsJohzELEIiDP90Me7hQ +Joks3ML38T4J/zZ4/ObbVMkrCEATYe3r1Ep7+e6VmOG9iTg0JIexexddjHX26Tgu +iuVP2GD/8PceqgNW/LPX84Ub32WTKPZJg+NyliDjH5QOvmguK1dRtSb/9eyYcoSF +Fkf0HcgG5jOk0OZJv0QcqXd9PhB4oXeuXgGszo9M+fhr3nWvEooAJtIyLtVtt/u2 +rNNB7xkZ1uWx+52w9RG2gmZh+LaESwd1rNXgUFLNBebNN3jNzsA= +=73xf +-----END PGP SIGNATURE-----` + + encodedTagFixture = `object 11525516bd55152ce68848bb14680aad43f18479 +type commit +tag v0.1.0 +tagger Sanskar Jaiswal 1691132850 +0530 + +v0.1.0 +` + + malformedEncodedTagFixture = `object 11525516bd55152ce68848bb14680aad43f18479 +tagger Sanskar Jaiswal 1691132850 +0530 + +v0.1.0 +` + + signatureTagFixture = `-----BEGIN PGP SIGNATURE----- + +iQIzBAABCAAdFiEEOxEY0f3iSZ5rKQ+vWYLQJ5wif/0FAmTMo7IACgkQWYLQJ5wi +f/1uUQ/9F70u8LZZQ3+U2vuYQ8fyVp/AV5h5zwxK5UlkR1crB0gSpdaiIxMMQRc8 +4QQIqCXloSHherUu9SPbDe9Qmr0JL8a57XqThjUSa52IYMDVos9sYwViJit+xGyz +HDot2nQ8MAqkDaiuwAnTqOyTPA89U36lGV/X/25mYxAuED+8xFx1OfvjGkX2eMEr +peWJ8VEfdFr2OmWwFceh6iF/izIaZGttwCyNy4BIh2W0GvUtQAxzqF4IzUvwfJU/ +bgARaHKQhWqFhDNImttsqJBweWavEDDmUgNg80c3cUZKqBtAjElToP9gis/SnPH5 +zaCAH66OzyKIhn6lde7KpOzyqbOyzddTa8SKkAAHyO7onukOktV8W9toeAxlF20q +Bw0MZGzAGisF8EK1HVv8UzrW9vAwdJN/yDIHWkjaeHr2FHmeV3a2QxH9PdwbE3tI +B21TCVULJuM8oR0ZG62xzg5ba5HiZMiilNMJdrBfjk5xYGk3LQU1gB4FVYa7yTsN +YfAokYtUIG187Qb8vPr1P95TzZxKdb7r/PAKEbGPro5D2Rri8OnxO/OaXG/giWS5 +5gRGmsQjvMsbzE/2PVc9+jshtZM49xL9H3DMjAWtO6MFbOqGqdi4MBa0T4qj6sZz +AbSLuRIBpXDES86faDXLRmufc95+iA/fh7W23G6vmd+SjXnCcHc= +=o4nf +-----END PGP SIGNATURE----- +` + + armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGQmiZ0BEACwsubUFoWtp6iJDK9oUN4RhPS0bAKpcRTa7P/rTCD/MbTMYdWC +4vod3FMm4+rNF0SESxY67MGmR4M3dSyOZkCijqHm9jDVOvN847LOl5bntkm8Euxm +LkpfsBWng09+gtfwuKxOxPMY017D1jM23OGbrqznHaokerFeDp9sJf1C7Z9jVf39 +oB/MF0bMdUJuxFFBdpoI73DORlAVUI14mfDbFj7v02Spkv1hqS2LtJ/Jl4QR/Vw4 +mR71aFmGFWqLBlkUOjJ2SZGkCmF/qbUdLmVb7yZUtqtua4DVkBPTORfOMhGDbrME +Nmb6Ft5neZwU0ETsT/oc6Np+PDFSUDBxu0CbKG6bw7N2y8RfiVJTaoNLFoFGV5dA +K8OpyTxU4IEPDMpkWs7tpRxPCC02uCfyqlvdF4EURXYXTj54DDLOGQjoqB+iGtVi +y2dQ4cuNhfuIFCFTA16s41DwmB0fQuOg3yfPPo7+jUefD+iAt3CZ9Guvu5+/mGyq +KxSBBRFHc8ED/L7JLPMU6tZglaPch9P4H6Fi2swDryyZQn/a2kYanEh9v1wL94L4 +3gUdjIYP8kjfg7nnS2FX9hl5FtPeM3jvnWjfv9jR+c8HWQZY2wM3Rj5iulu70K2U +pkdRUN0p2D5+Kq6idNreNoPlpQGoUOYrtAfOwtDFgMwuOZ78XkSIbFhtgwARAQAB +tEVTYW5za2FyIEphaXN3YWwgKEdpdEh1YiBHUEcgc2lnaW5nIGtleSkgPGphaXN3 +YWxzYW5za2FyMDc4QGdtYWlsLmNvbT6JAk4EEwEIADgWIQQ7ERjR/eJJnmspD69Z +gtAnnCJ//QUCZCaJnQIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRBZgtAn +nCJ//dF4D/0Tl5Wre6KrZvjDs5loulhN8YMYb63jr+x1eVkpMpta51XvZvkZFoiY +9T4MQX+qgAkTrUJsxgWUwtVtDfmbyLXodDRS6JUbCRiMu12VD7mNT+lUfuhR2sJv +rHZoolQp7X4DTea1R64PcttfmlGO2pUNpGNmhojO0PahXqOCHmEUWBJQhI8RvOcs +zRjEzDcAcEgtMGzamq6DR54YxyzGE8V9b5WD/elmEXM6uWW+CkfX8WskKbLdRY0t ++GQ1pOtf3tKxD46I3LIsUEwbyh4Dv4vJbZmyxjI+FKbSCW5tMrz/ZWrPNl0m+pDI +Yn0+GWed2pgTMFh3VAhYCyIVugKynlaToH+D2z3DnuEp3Jfs+b1BdirS/PW79tW7 +rjCJzqofF2UPyK0mzdYL+P3k9Hip5J0bCGoeMdCLsP5fYq3Y1YS4bH4JkDm52y+r +y89AH4LHHQt+A7w19I+6M2jmcNnDUMrpuSo84GeoM59O3fU7hLCC1Jx4hj7EBRrb +QzY5FInrE/WTcgFRljK46zhW4ybmfak/xJV654UqJCDWlVbc68D8JrKNQOj7gdPs +zh1+m2pFDEhWZkaFtQbSEpXMIJ9DsCoyQL4Knl+89VxHsrIyAJsmGb3V8xvtv5w9 +QuWtsDnYbvDHtTpu1NZChVrnr/l1k3C2fcLhV1s583AvhGMkbgSXkQ== +=Tdjz +-----END PGP PUBLIC KEY BLOCK----- +` +) + +func TestGitRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "gitrepo-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + gitRepo := &sourcev1.GitRepository{} + gitRepo.Name = "test-gitrepo" + gitRepo.Namespace = namespaceName + gitRepo.Spec = sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: "https://example.com", + } + // Add a test finalizer to prevent the object from getting deleted. + gitRepo.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, gitRepo)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, gitRepo)).NotTo(HaveOccurred()) + + r := &GitRepositoryReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(gitRepo)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestGitRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + origObj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "gitrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: server.HTTPAddress() + repoPath, + }, + } + obj := origObj.DeepCopy() + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for GitRepository to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: gitRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for GitRepository to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) +} + +func TestGitRepositoryReconciler_reconcileSource_emptyRepository(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "empty-", + Generation: 1, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + URL: server.HTTPAddress() + "/test.git", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var commit git.Commit + var includes artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, &commit, &includes, t.TempDir()) + assertConditions := []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "EmptyGitRepository", "git repository is empty"), + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(assertConditions)) + g.Expect(err).To(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(commit).ToNot(BeNil()) +} + +func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + protocol string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + secretFunc func(secret *corev1.Secret, baseURL string) + middlewareFunc gittestserver.HTTPMiddleware + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without secretRef makes Reconciling=True", + protocol: "http", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTP with Basic Auth secret makes Reconciling=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with mutual TLS makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": clientPrivateKey, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "mtls-certs"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with mutual TLS and invalid private key makes CheckoutFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-mtls-certs"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "tls: failed to find any PEM data in key input"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + name: "HTTPS with CAFile secret makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with CAFile secret with both ca.crt and caFile keys makes Reconciling=True and ignores caFile", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + // The expected error messages may differ when in darwin. In some cases it will match the + // error message expected in linux: "x509: certificate signed by unknown authority". In + // other cases it may get "x509: “example.com” certificate is not standards compliant" instead. + // + // Trimming the expected error message for consistent results. + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "x509: "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + name: "mTLS GitHub App without ca.crt makes FetchFailed=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-no-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-no-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo") + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + // should record a FetchFailedCondition due to TLS handshake + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "x509: "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo"), + }, + }, + { + name: "mTLS GitHub App with ca.crt makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + username: github.AccessTokenUsername, + password: "some-enterprise-token", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-ca"} + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + secret.Data["ca.crt"] = tlsCA + }, + middlewareFunc: func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/api/v3/app/installations/") { + w.WriteHeader(http.StatusOK) + tok := &github.AppToken{ + Token: "some-enterprise-token", + ExpiresAt: time.Now().Add(time.Hour), + } + _ = json.NewEncoder(w).Encode(tok) + } + handler.ServeHTTP(w, r) + }) + }, + wantErr: false, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + // TODO: Add test case for HTTPS with bearer token auth secret. It + // depends on gitkit to have support for bearer token based + // authentication. + { + name: "SSH with private key secret makes Reconciling=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:"), + }, + }, + { + name: "SSH with password protected private key secret makes Reconciling=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMEncryptedKeys[2].PEMBytes, + "password": []byte("password"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "Include get failure makes CheckoutFailed=True and returns error", + protocol: "http", + server: options{ + username: "git", + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/non-existing': secrets \"non-existing\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging/some-revision", + Path: randStringRunes(10), + }, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master@sha1:'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without secret ref makes FetchFailed=True", + protocol: "http", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "empty provider with github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-app-secret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("1111"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-app-secret"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef '/github-app-secret' has github app data but provider is not set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-basic-auth"} + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + Generation: 1, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + + if tt.middlewareFunc != nil { + server.AddHTTPMiddlewares(tt.middlewareFunc) + } + + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + if len(tt.server.username+tt.server.password) > 0 { + server.Auth(tt.server.username, tt.server.password) + } + + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + obj.Spec.URL = server.HTTPAddress() + repoPath + case "https": + g.Expect(server.StartHTTPS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + obj.Spec.URL = server.HTTPAddress() + repoPath + case "ssh": + server.KeyDir(filepath.Join(server.Root(), "keys")) + + g.Expect(server.ListenSSH()).To(Succeed()) + obj.Spec.URL = server.SSHAddress() + repoPath + + go func() { + server.StartSSH() + }() + defer server.StopSSH() + + if secret != nil && len(secret.Data["known_hosts"]) == 0 { + u, err := url.Parse(obj.Spec.URL) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(u.Host).ToNot(BeEmpty()) + knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) + g.Expect(err).NotTo(HaveOccurred()) + secret.Data["known_hosts"] = knownHosts + } + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.secretFunc != nil { + tt.secretFunc(secret, server.HTTPAddress()) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if secret != nil { + clientBuilder.WithObjects(secret.DeepCopy()) + } + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + tmpDir := t.TempDir() + + head, _ := localRepo.Head() + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", head.Hash().String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var commit git.Commit + var includes artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, &commit, &includes, tmpDir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(commit).ToNot(BeNil()) + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { + tests := []struct { + name string + url string + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + wantErr string + }{ + { + name: "azure provider", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + }, + wantErr: "ManagedIdentityCredential", + }, + { + name: "azure provider with service account and feature gate for object-level identity disabled", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + obj.Spec.ServiceAccountName = "azure-sa" + }, + wantErr: auth.FeatureGateObjectLevelWorkloadIdentity, + }, + { + name: "github provider with no secret ref", + url: "https://github.com/org/repo.git", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", + }, + { + name: "github provider with github app data in secret", + url: "https://example.com/org/repo", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "githubAppSecret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: []byte("abc"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "githubAppSecret", + } + }, + wantErr: "Key must be a PEM encoded PKCS1 or PKCS8 key", + }, + { + name: "generic provider with github app data in secret", + url: "https://example.com/org/repo", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "githubAppSecret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGeneric + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "githubAppSecret", + } + }, + wantErr: "secretRef '/githubAppSecret' has github app data but provider is not set to github", + }, + { + name: "github provider with basic auth secret", + url: "https://github.com/org/repo.git", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth-secret", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "basic-auth-secret", + } + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", + }, + { + name: "generic provider", + url: "https://example.com/org/repo", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGeneric + }, + }, + { + name: "secret ref defined for non existing secret", + url: "https://github.com/org/repo.git", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "authSecret", + } + }, + wantErr: "failed to get secret '/authSecret': secrets \"authSecret\" not found", + }, + { + url: "https://example.com/org/repo", + name: "no provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + obj := &sourcev1.GitRepository{} + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + url, err := url.Parse(tt.url) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + opts, err := r.getAuthOpts(ctx, obj, *url, nil) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(opts).ToNot(BeNil()) + g.Expect(opts.BearerToken).To(BeEmpty()) + g.Expect(opts.Username).To(BeEmpty()) + g.Expect(opts.Password).To(BeEmpty()) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) { + g := NewWithT(t) + + branches := []string{"staging"} + tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"} + refs := []string{"refs/pull/420/head"} + + tests := []struct { + name string + reference *sourcev1.GitRepositoryRef + beforeFunc func(obj *sourcev1.GitRepository, latestRev string) + want sreconcile.Result + wantErr bool + wantRevision string + wantArtifactOutdated bool + wantReconciling bool + }{ + { + name: "Nil reference (default branch)", + want: sreconcile.ResultSuccess, + wantRevision: "master@sha1:", + wantReconciling: true, + }, + { + name: "Branch", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantReconciling: true, + }, + { + name: "Tag", + reference: &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + }, + want: sreconcile.ResultSuccess, + wantRevision: "v0.1.0@sha1:", + wantReconciling: true, + }, + { + name: "Branch commit", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + Commit: "", + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantReconciling: true, + }, + { + name: "Ref Name pointing to a branch", + reference: &sourcev1.GitRepositoryRef{ + Name: "refs/heads/staging", + }, + want: sreconcile.ResultSuccess, + wantRevision: "refs/heads/staging@sha1:", + wantReconciling: true, + }, + { + name: "Ref Name pointing to a PR", + reference: &sourcev1.GitRepositoryRef{ + Name: "refs/pull/420/head", + }, + want: sreconcile.ResultSuccess, + wantRevision: "refs/pull/420/head@sha1:", + wantReconciling: true, + }, + { + name: "SemVer", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "*", + }, + want: sreconcile.ResultSuccess, + wantRevision: "v2.0.0@sha1:", + wantReconciling: true, + }, + { + name: "SemVer range", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "", + wantReconciling: true, + }, + { + name: "SemVer prerelease", + reference: &sourcev1.GitRepositoryRef{ + SemVer: ">=1.0.0-0 <1.1.0-0", + }, + wantRevision: "v1.0.0-alpha@sha1:", + want: sreconcile.ResultSuccess, + wantReconciling: true, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging/some-revision", + Path: randStringRunes(10), + }, + } + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantArtifactOutdated: true, + wantReconciling: true, + }, + { + name: "Optimized clone", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { + // Add existing artifact on the object and storage. + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging@sha1:" + latestRev, + Path: randStringRunes(10), + }, + } + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantRevision: "staging@sha1:", + wantReconciling: false, + }, + { + name: "Optimized clone different ignore", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { + // Set new ignore value. + obj.Spec.Ignore = ptr.To("foo") + // Add existing artifact on the object and storage. + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging@sha1:" + latestRev, + Path: randStringRunes(10), + }, + } + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantReconciling: false, + }, + } + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).To(BeNil()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + headRef, err := localRepo.Head() + g.Expect(err).NotTo(HaveOccurred()) + + for _, branch := range branches { + g.Expect(remoteBranchForHead(localRepo, headRef, branch)).To(Succeed()) + } + for _, tag := range tags { + g.Expect(remoteTagForHead(localRepo, headRef, tag)).To(Succeed()) + } + + for _, ref := range refs { + g.Expect(remoteRefForHead(localRepo, headRef, ref)).To(Succeed()) + } + + r := &GitRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "checkout-strategy-", + Generation: 1, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + URL: server.HTTPAddress() + repoPath, + Reference: tt.reference, + }, + } + + if obj.Spec.Reference != nil && obj.Spec.Reference.Commit == "" { + obj.Spec.Reference.Commit = headRef.Hash().String() + } + + tmpDir := t.TempDir() + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, headRef.Hash().String()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var commit git.Commit + var includes artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + got, err := r.reconcileSource(ctx, sp, obj, &commit, &includes, tmpDir) + if err != nil && !tt.wantErr { + t.Log(err) + } + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + if tt.wantRevision != "" && !tt.wantErr { + revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String()) + g.Expect(commitReference(obj, &commit)).To(Equal(revision)) + g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(Equal(tt.wantArtifactOutdated)) + g.Expect(conditions.IsTrue(obj, meta.ReconcilingCondition)).To(Equal(tt.wantReconciling)) + } + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + dir string + includes artifactSet + beforeFunc func(obj *sourcev1.GitRepository) + afterFunc func(t *WithT, obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes ArtifactInStorage=True", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Archiving artifact to storage with includes makes ArtifactInStorage=True", + dir: "testdata/git/repository", + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Include = []sourcev1.GitRepositoryInclude{ + {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}}, + } + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:34d9af1a2fcfaef3ee9487d67dc2d642bc7babdb9444a5f60d1f32df32e4de7d")) + t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + dir: "testdata/git/repository", + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Include = []sourcev1.GitRepositoryInclude{ + {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}}, + } + obj.Status.Artifact = &meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"} + obj.Status.IncludedArtifacts = []*meta.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}} + obj.Status.ObservedInclude = obj.Spec.Include + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Spec ignore overwrite is taken into account", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Ignore = ptr.To("!**.txt\n") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:a17037f96f541a47bdadcd12ab40b943c50a9ffd25dc8a30a5e9af52971fd94f")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Source ignore for subdir ignore patterns", + dir: "testdata/git/repowithsubdirs", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:ad9943d761b30e943e2a770ea9083a40fc03f09846efd61f6c442cc48fefad11")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:34d9af1a2fcfaef3ee9487d67dc2d642bc7babdb9444a5f60d1f32df32e4de7d")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Target path does not exists", + dir: "testdata/git/foo", + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat target artifact path"), + }, + }, + { + name: "Target path is not a directory", + dir: "testdata/git/repository/foo.txt", + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "invalid target path"), + }, + }, + } + artifactSize := func(g *WithT, artifactURL string) *int64 { + if artifactURL == "" { + return nil + } + res, err := http.Get(artifactURL) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.StatusCode).To(Equal(http.StatusOK)) + defer res.Body.Close() + return &res.ContentLength + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + resetChmod(tt.dir, 0o750, 0o600) + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "reconcile-artifact-", + Generation: 1, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + commit := git.Commit{ + Hash: []byte("b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"), + Reference: "refs/heads/main", + } + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(ctx, sp, obj, &commit, &tt.includes, tt.dir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if obj.Status.Artifact != nil { + g.Expect(obj.Status.Artifact.Size).To(Equal(artifactSize(g, obj.Status.Artifact.URL))) + } + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { + g := NewWithT(t) + + server, err := testserver.NewTempArtifactServer() + g.Expect(err).NotTo(HaveOccurred()) + server.Start() + defer server.Stop() + storage, err := newTestStorage(server.HTTPServer) + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(storage.BasePath) + + dependencyInterval := 5 * time.Second + + type dependency struct { + name string + withArtifact bool + conditions []metav1.Condition + } + + type include struct { + name string + fromPath string + toPath string + shouldExist bool + } + + tests := []struct { + name string + dependencies []dependency + includes []include + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "New includes make ArtifactOutdated=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), + }, + }, + { + name: "b", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/", shouldExist: true}, + {name: "b", toPath: "b/", shouldExist: true}, + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Invalid FromPath makes IncludeUnavailable=True and returns error", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + }, + }, + includes: []include{ + {name: "a", fromPath: "../../../path", shouldExist: false}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, "CopyFailure", "unpack/path: no such file or directory"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var depObjs []client.Object + for _, d := range tt.dependencies { + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.name, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: d.conditions, + }, + } + if d.withArtifact { + obj.Status.Artifact = &meta.Artifact{ + Path: d.name + ".tar.gz", + Revision: d.name, + LastUpdateTime: metav1.Now(), + } + g.Expect(storage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)).To(Succeed()) + } + depObjs = append(depObjs, obj) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if len(tt.dependencies) > 0 { + clientBuilder.WithObjects(depObjs...) + } + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, + requeueDependency: dependencyInterval, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-include", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + }, + } + + for i, incl := range tt.includes { + incl := sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, + FromPath: incl.fromPath, + ToPath: incl.toPath, + } + tt.includes[i].fromPath = incl.GetFromPath() + tt.includes[i].toPath = incl.GetToPath() + obj.Spec.Include = append(obj.Spec.Include, incl) + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + tmpDir := t.TempDir() + + var commit git.Commit + var includes artifactSet + + // Build includes artifactSet. + artifactSet, err := r.fetchIncludes(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + includes = *artifactSet + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileInclude(ctx, sp, obj, &commit, &includes, tmpDir) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + if err == nil { + g.Expect(len(includes)).To(Equal(len(tt.includes))) + } + g.Expect(got).To(Equal(tt.want)) + for _, i := range tt.includes { + if i.toPath != "" { + expect := g.Expect(filepath.Join(tmpDir, i.toPath)) + if i.shouldExist { + expect.To(BeADirectory()) + } else { + expect.NotTo(BeADirectory()) + } + } + if i.shouldExist { + g.Expect(filepath.Join(tmpDir, i.toPath)).Should(BeADirectory()) + } else { + g.Expect(filepath.Join(tmpDir, i.toPath)).ShouldNot(BeADirectory()) + } + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "e", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + defer func() { + g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) + }() + + r := &GitRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var c *git.Commit + var as artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + got, err := r.reconcileStorage(context.TODO(), sp, obj, c, &as, "") + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.GitRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestGitRepositoryReconciler_verifySignature(t *testing.T) { + tests := []struct { + name string + secret *corev1.Secret + commit git.Commit + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + err error + wantSourceVerificationMode *sourcev1.GitVerificationMode + assertConditions []metav1.Condition + }{ + { + name: "Valid commit with mode=HEAD makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- commit 'shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Valid commit with mode=head makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- commit 'shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Valid tag with mode=tag makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTag), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- tag 'v0.1.0@shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Valid tag and commit with mode=TagAndHEAD makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTagAndHEAD), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- tag 'v0.1.0@shasum' with key '5982D0279C227FFD'\n\t- commit 'shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Source verification mode in status is unset if there's no verification in spec", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.SourceVerificationMode = ptrToVerificationMode(sourcev1.ModeGitHEAD) + obj.Spec.Verification = nil + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Verification of tag with no tag ref SourceVerifiedCondition=False and returns a stalling error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Branch: "main", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + err: serror.NewStalling( + errors.New("cannot verify tag object's signature if a tag reference is not specified"), + "InvalidVerificationMode", + ), + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidVerificationMode", "cannot verify tag object's signature if a tag reference is not specified"), + }, + }, + { + name: "Unsigned tag with mode=tag makes SourceVerifiedCondition=False", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidGitObject", "cannot verify signature of tag 'v0.1.0@shasum' since it is not signed"), + }, + }, + { + name: "Partially successful verification makes SourceVerifiedCondition=False", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: unable to verify Git commit: unable to verify payload with any of the given key rings"), + }, + }, + { + name: "Invalid commit makes SourceVerifiedCondition=False and returns error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: unable to verify Git commit: unable to verify payload with any of the given key rings"), + }, + }, + { + name: "Invalid tag signature with mode=tag makes SourceVerifiedCondition=False", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidTagSignature", "signature verification of tag 'v0.1.0@shasum' failed: unable to verify Git tag: unable to verify payload with any of the given key rings"), + }, + }, + { + name: "Invalid PGP key makes SourceVerifiedCondition=False and returns error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid", + }, + Data: map[string][]byte{ + "foo": []byte("invalid PGP public key"), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "invalid", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: unable to verify Git commit: unable to read armored key ring: openpgp: invalid argument: no armored data found"), + }, + }, + { + name: "Secret get failure makes SourceVerifiedCondition=False and returns error", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "none-existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "VerificationError", "PGP public keys secret error: secrets \"none-existing\" not found"), + }, + }, + { + name: "Nil verification in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + { + name: "Empty verification mode in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-commit-", + Generation: 1, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + got, err := r.verifySignature(context.TODO(), obj, tt.commit) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + if tt.err != nil { + g.Expect(err).To(Equal(tt.err)) + } + g.Expect(got).To(Equal(tt.want)) + if tt.wantSourceVerificationMode != nil { + g.Expect(*obj.Status.SourceVerificationMode).To(Equal(*tt.wantSourceVerificationMode)) + } else { + g.Expect(obj.Status.SourceVerificationMode).To(BeNil()) + } + }) + } +} + +func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "no failure condition", + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "reconciling condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "stalled condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "mixed failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "reconciling and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "stalled and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "condition-update", + Namespace: "default", + Finalizers: []string{sourcev1.SourceFinalizer}, + }, + Spec: sourcev1.GitRepositorySpec{ + URL: server.HTTPAddress() + repoPath, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithObjects(obj). + WithStatusSubresource(&sourcev1.GitRepository{}) + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + key := client.ObjectKeyFromObject(obj) + res, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: key}) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(res).To(Equal(tt.want)) + + updatedObj := &sourcev1.GitRepository{} + err = r.Get(ctx, key, updatedObj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(updatedObj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +// helpers + +func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) { + fs := memfs.New() + repo, err := gogit.Init(memory.NewStorage(), fs) + if err != nil { + return nil, err + } + + branchRef := plumbing.NewBranchReferenceName(branch) + if err = repo.CreateBranch(&config.Branch{ + Name: branch, + Remote: gogit.DefaultRemoteName, + Merge: branchRef, + }); err != nil { + return nil, err + } + + err = commitFromFixture(repo, fixture) + if err != nil { + return nil, err + } + + if server.HTTPAddress() == "" { + if err = server.StartHTTP(); err != nil { + return nil, err + } + defer server.StopHTTP() + } + if _, err = repo.CreateRemote(&config.RemoteConfig{ + Name: gogit.DefaultRemoteName, + URLs: []string{server.HTTPAddressWithCredentials() + repositoryPath}, + }); err != nil { + return nil, err + } + + if err = repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"}, + }); err != nil { + return nil, err + } + + return repo, nil +} + +func Test_commitFromFixture(t *testing.T) { + g := NewWithT(t) + + repo, err := gogit.Init(memory.NewStorage(), memfs.New()) + g.Expect(err).ToNot(HaveOccurred()) + + err = commitFromFixture(repo, "testdata/git/repository") + g.Expect(err).ToNot(HaveOccurred()) +} + +func commitFromFixture(repo *gogit.Repository, fixture string) error { + working, err := repo.Worktree() + if err != nil { + return err + } + fs := working.Filesystem + + if err = filepath.Walk(fixture, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return fs.MkdirAll(fs.Join(path[len(fixture):]), info.Mode()) + } + + fileBytes, err := os.ReadFile(path) + if err != nil { + return err + } + + ff, err := fs.Create(path[len(fixture):]) + if err != nil { + return err + } + defer ff.Close() + + _, err = ff.Write(fileBytes) + return err + }); err != nil { + return err + } + + _, err = working.Add(".") + if err != nil { + return err + } + + if _, err = working.Commit("Fixtures from "+fixture, &gogit.CommitOptions{ + Author: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), + }, + }); err != nil { + return err + } + + return nil +} + +func remoteBranchForHead(repo *gogit.Repository, head *plumbing.Reference, branch string) error { + refSpec := fmt.Sprintf("%s:refs/heads/%s", head.Name(), branch) + return repo.Push(&gogit.PushOptions{ + RemoteName: "origin", + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + Force: true, + }) +} + +func remoteTagForHead(repo *gogit.Repository, head *plumbing.Reference, tag string) error { + if _, err := repo.CreateTag(tag, head.Hash(), &gogit.CreateTagOptions{ + // Not setting this seems to make things flaky + // Expected success, but got an error: + // <*errors.errorString | 0xc0000f6350>: { + // s: "tagger field is required", + // } + // tagger field is required + Tagger: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), + }, + Message: tag, + }); err != nil { + return err + } + refSpec := fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", tag) + return repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + }) +} + +func remoteRefForHead(repo *gogit.Repository, head *plumbing.Reference, reference string) error { + if err := repo.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName(reference), head.Hash())); err != nil { + return err + } + if err := repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{ + config.RefSpec("+" + reference + ":" + reference), + }, + }); err != nil { + return err + } + return nil +} + +func TestGitRepositoryReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "multiple positive conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit"), + }, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + TypeMeta: metav1.TypeMeta{ + APIVersion: sourcev1.GroupVersion.String(), + Kind: sourcev1.GitRepositoryKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepo", + Namespace: "foo", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithObjects(obj). + WithStatusSubresource(&sourcev1.GitRepository{}) + + c := clientBuilder.Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(gitRepositoryReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestGitRepositoryReconciler_notify(t *testing.T) { + concreteCommit := git.Commit{ + Hash: git.Hash("b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"), + Message: "test commit", + Encoded: []byte("content"), + } + partialCommit := git.Commit{ + Hash: git.Hash("b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"), + } + + noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason") + noopErr.Ignore = true + + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.GitRepository) + newObjBeforeFunc func(obj *sourcev1.GitRepository) + commit git.Commit + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + }, + commit: concreteCommit, + wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + commit: concreteCommit, + wantEvent: "Normal Succeeded stored artifact for commit 'test commit'", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + commit: concreteCommit, + wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + { + name: "no-op error result", + res: sreconcile.ResultEmpty, + resErr: noopErr, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + commit: partialCommit, // no-op will always result in partial commit. + wantEvent: "Normal Succeeded stored artifact for commit 'sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.GitRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &GitRepositoryReconciler{ + EventRecorder: recorder, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + reconciler.notify(ctx, oldObj, newObj, tt.commit, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { + type dependency struct { + name string + withArtifact bool + conditions []metav1.Condition + } + + type include struct { + name string + fromPath string + toPath string + shouldExist bool + } + + tests := []struct { + name string + dependencies []dependency + includes []include + beforeFunc func(obj *sourcev1.GitRepository) + wantErr bool + wantArtifactSet artifactSet + assertConditions []metav1.Condition + }{ + { + name: "Existing includes", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), + }, + }, + { + name: "b", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/", shouldExist: true}, + {name: "b", toPath: "b/", shouldExist: true}, + }, + wantErr: false, + wantArtifactSet: []*meta.Artifact{ + {Revision: "a"}, + {Revision: "b"}, + }, + }, + { + name: "Include get failure", + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), + }, + }, + { + name: "Include without an artifact makes IncludeUnavailable=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: false, + conditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"), + }, + }, + { + name: "Outdated IncludeUnavailable is removed", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "") + }, + assertConditions: []metav1.Condition{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var depObjs []client.Object + for _, d := range tt.dependencies { + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.name, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: d.conditions, + }, + } + if d.withArtifact { + obj.Status.Artifact = &meta.Artifact{ + Path: d.name + ".tar.gz", + Revision: d.name, + LastUpdateTime: metav1.Now(), + } + } + depObjs = append(depObjs, obj) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if len(tt.dependencies) > 0 { + clientBuilder.WithObjects(depObjs...) + } + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-include", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + }, + } + + for i, incl := range tt.includes { + incl := sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, + FromPath: incl.fromPath, + ToPath: incl.toPath, + } + tt.includes[i].fromPath = incl.GetFromPath() + tt.includes[i].toPath = incl.GetToPath() + obj.Spec.Include = append(obj.Spec.Include, incl) + } + + gotArtifactSet, err := r.fetchIncludes(ctx, obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + if !tt.wantErr && gotArtifactSet != nil { + g.Expect(gotArtifactSet.Diff(tt.wantArtifactSet)).To(BeFalse()) + } + }) + } +} + +func resetChmod(path string, dirMode os.FileMode, fileMode os.FileMode) error { + err := filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() && info.Mode() != dirMode { + os.Chmod(path, dirMode) + } else if !info.IsDir() && info.Mode() != fileMode { + os.Chmod(path, fileMode) + } + return nil + }) + if err != nil { + return fmt.Errorf("cannot reset file permissions: %v", err) + } + + return nil +} + +func TestGitRepositoryIncludeEqual(t *testing.T) { + tests := []struct { + name string + a sourcev1.GitRepositoryInclude + b sourcev1.GitRepositoryInclude + want bool + }{ + { + name: "empty", + want: true, + }, + { + name: "different refs", + a: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + }, + b: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "bar"}, + }, + want: false, + }, + { + name: "same refs", + a: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + }, + b: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + }, + want: true, + }, + { + name: "different from paths", + a: sourcev1.GitRepositoryInclude{FromPath: "foo"}, + b: sourcev1.GitRepositoryInclude{FromPath: "bar"}, + want: false, + }, + { + name: "same from paths", + a: sourcev1.GitRepositoryInclude{FromPath: "foo"}, + b: sourcev1.GitRepositoryInclude{FromPath: "foo"}, + want: true, + }, + { + name: "different to paths", + a: sourcev1.GitRepositoryInclude{ToPath: "foo"}, + b: sourcev1.GitRepositoryInclude{ToPath: "bar"}, + want: false, + }, + { + name: "same to paths", + a: sourcev1.GitRepositoryInclude{ToPath: "foo"}, + b: sourcev1.GitRepositoryInclude{ToPath: "foo"}, + want: true, + }, + { + name: "same all", + a: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo-ref"}, + FromPath: "foo-path", + ToPath: "bar-path", + }, + b: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo-ref"}, + FromPath: "foo-path", + ToPath: "bar-path", + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(gitRepositoryIncludeEqual(tt.a, tt.b)).To(Equal(tt.want)) + }) + } +} + +func TestGitContentConfigChanged(t *testing.T) { + tests := []struct { + name string + obj sourcev1.GitRepository + artifacts []*meta.Artifact + want bool + }{ + { + name: "no content config", + want: false, + }, + { + name: "unobserved ignore", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{Ignore: ptr.To("foo")}, + }, + want: true, + }, + { + name: "observed ignore", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{Ignore: ptr.To("foo")}, + Status: sourcev1.GitRepositoryStatus{ObservedIgnore: ptr.To("foo")}, + }, + want: false, + }, + { + name: "unobserved recurse submodules", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{RecurseSubmodules: true}, + }, + want: true, + }, + { + name: "observed recurse submodules", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{RecurseSubmodules: true}, + Status: sourcev1.GitRepositoryStatus{ObservedRecurseSubmodules: true}, + }, + want: false, + }, + { + name: "unobserved sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c"}}, + }, + want: true, + }, + { + name: "unobserved case sensitive sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/Z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: true, + }, + { + name: "observed sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: false, + }, + { + name: "observed sparse checkout with leading slash", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + }, + want: false, + }, + { + name: "unobserved include", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, FromPath: "bar", ToPath: "baz"}, + }, + }, + }, + want: true, + }, + { + name: "observed include", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + want: false, + }, + { + name: "observed include but different artifact revision", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "ccc", Digest: "bbb"}, + }, + want: true, + }, + { + name: "observed include but different artifact digest", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "ddd"}, + }, + want: true, + }, + { + name: "observed include but updated spec", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + want: true, + }, + { + name: "different number of include and observed include", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + IncludedArtifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ccc"}, + }, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ddd"}, + }, + want: true, + }, + { + name: "different number of include and artifactset", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ccc"}, + }, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + want: true, + }, + { + name: "different number of include and included artifacts", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ccc"}, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + includes := artifactSet(tt.artifacts) + g.Expect(gitContentConfigChanged(&tt.obj, &includes)).To(Equal(tt.want)) + }) + } +} + +func Test_requiresVerification(t *testing.T) { + tests := []struct { + name string + obj *sourcev1.GitRepository + want bool + }{ + { + name: "GitRepository without verification does not require verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{}, + }, + want: false, + }, + { + name: "GitRepository with verification and no observed verification mode in status requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{}, + }, + }, + want: true, + }, + { + name: "GitRepository with HEAD verification and a verified tag requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTag), + }, + }, + want: true, + }, + { + name: "GitRepository with tag and HEAD verification and a verified tag requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTag), + }, + }, + want: true, + }, + { + name: "GitRepository with tag verification and a verified HEAD requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + }, + }, + want: true, + }, + { + name: "GitRepository with tag and HEAD verification and a verified HEAD requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + }, + }, + want: true, + }, + { + name: "GitRepository with tag verification and a verified HEAD and tag does not require verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTagAndHEAD), + }, + }, + want: false, + }, + { + name: "GitRepository with head verification and a verified HEAD and tag does not require verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTagAndHEAD), + }, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + verificationRequired := requiresVerification(tt.obj) + g.Expect(verificationRequired).To(Equal(tt.want)) + }) + } +} + +func ptrToVerificationMode(mode sourcev1.GitVerificationMode) *sourcev1.GitVerificationMode { + return &mode +} diff --git a/controllers/helmchart_controller.go b/internal/controller/helmchart_controller.go similarity index 57% rename from controllers/helmchart_controller.go rename to internal/controller/helmchart_controller.go index 032f678bb..e969bf67a 100644 --- a/controllers/helmchart_controller.go +++ b/internal/controller/helmchart_controller.go @@ -14,11 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "context" "crypto/tls" + "encoding/json" "errors" "fmt" "net/url" @@ -28,16 +29,21 @@ import ( "strings" "time" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/opencontainers/go-digest" + "github.com/sigstore/cosign/v2/pkg/cosign" helmgetter "helm.sh/helm/v3/pkg/getter" helmreg "helm.sh/helm/v3/pkg/registry" + helmrepo "helm.sh/helm/v3/pkg/repo" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/uuid" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -45,25 +51,29 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/events" + "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/pkg/untar" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/tar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/cache" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/getter" - "github.com/fluxcd/source-controller/internal/helm/registry" "github.com/fluxcd/source-controller/internal/helm/repository" + soci "github.com/fluxcd/source-controller/internal/oci" + scosign "github.com/fluxcd/source-controller/internal/oci/cosign" + "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/internal/util" @@ -79,6 +89,7 @@ var helmChartReadyCondition = summarize.Conditions{ sourcev1.BuildFailedCondition, sourcev1.ArtifactOutdatedCondition, sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, meta.ReadyCondition, meta.ReconcilingCondition, meta.StalledCondition, @@ -89,6 +100,7 @@ var helmChartReadyCondition = summarize.Conditions{ sourcev1.BuildFailedCondition, sourcev1.ArtifactOutdatedCondition, sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, meta.StalledCondition, meta.ReconcilingCondition, }, @@ -121,35 +133,44 @@ type HelmChartReconciler struct { helper.Metrics RegistryClientGenerator RegistryClientGeneratorFunc - Storage *Storage + Storage *storage.Storage Getters helmgetter.Providers ControllerName string Cache *cache.Cache TTL time.Duration *cache.CacheRecorder + + patchOptions []patch.Option } -func (r *HelmChartReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, HelmChartReconcilerOptions{}) +// RegistryClientGeneratorFunc is a function that returns a registry client +// and an optional file name. +// The file is used to store the registry client credentials. +// The caller is responsible for deleting the file. +type RegistryClientGeneratorFunc func(tlsConfig *tls.Config, isLogin, insecure bool) (*helmreg.Client, string, error) + +func (r *HelmChartReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(ctx, mgr, HelmChartReconcilerOptions{}) } type HelmChartReconcilerOptions struct { - MaxConcurrentReconciles int - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } -// helmChartReconcileFunc is the function type for all the v1beta2.HelmChart +// helmChartReconcileFunc is the function type for all the v1.HelmChart // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. -type helmChartReconcileFunc func(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) +type helmChartReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) -func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { - if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, +func (r *HelmChartReconciler) SetupWithManagerAndOptions(ctx context.Context, mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { + r.patchOptions = getPatchOptions(helmChartReadyCondition.Owned, r.ControllerName) + + if err := mgr.GetCache().IndexField(ctx, &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, r.indexHelmRepositoryByURL); err != nil { return fmt.Errorf("failed setting index fields: %w", err) } - if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmChart{}, sourcev1.SourceIndexKey, + if err := mgr.GetCache().IndexField(ctx, &sourcev1.HelmChart{}, sourcev1.SourceIndexKey, r.indexHelmChartBySource); err != nil { return fmt.Errorf("failed setting index fields: %w", err) } @@ -159,37 +180,29 @@ func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), )). Watches( - &source.Kind{Type: &sourcev1.HelmRepository{}}, + &sourcev1.HelmRepository{}, handler.EnqueueRequestsFromMapFunc(r.requestsForHelmRepositoryChange), builder.WithPredicates(SourceRevisionChangePredicate{}), ). Watches( - &source.Kind{Type: &sourcev1.GitRepository{}}, + &sourcev1.GitRepository{}, handler.EnqueueRequestsFromMapFunc(r.requestsForGitRepositoryChange), builder.WithPredicates(SourceRevisionChangePredicate{}), ). Watches( - &source.Kind{Type: &sourcev1.Bucket{}}, + &sourcev1.Bucket{}, handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange), builder.WithPredicates(SourceRevisionChangePredicate{}), ). WithOptions(controller.Options{ - MaxConcurrentReconciles: opts.MaxConcurrentReconciles, - RateLimiter: opts.RateLimiter, - RecoverPanic: true, + RateLimiter: opts.RateLimiter, }). Complete(r) } func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() - log := ctrl.LoggerFrom(ctx). - // Sets a reconcile ID to correlate logs from all suboperations. - WithValues("reconcileID", uuid.NewUUID()) - - // logger will be associated to the new context that is - // returned from ctrl.LoggerInto. - ctx = ctrl.LoggerInto(ctx, log) + log := ctrl.LoggerFrom(ctx) // Fetch the HelmChart obj := &sourcev1.HelmChart{} @@ -197,20 +210,8 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, client.IgnoreNotFound(err) } - // Record suspended status metric - r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - - // Return early if the object is suspended - if obj.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - // Initialize the patch helper with the current version of the object. - patchHelper, err := patch.NewHelper(obj, r.Client) - if err != nil { - return ctrl.Result{}, err - } + serialPatcher := patch.NewSerialPatcher(obj, r.Client) // recResult stores the abstracted reconcile result. var recResult sreconcile.Result @@ -218,37 +219,48 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Always attempt to patch the object after each reconciliation. // NOTE: The final runtime result and error are set in this block. defer func() { - summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) summarizeOpts := []summarize.Option{ summarize.WithConditions(helmChartReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), summarize.WithReconcileResult(recResult), summarize.WithReconcileError(retErr), summarize.WithIgnoreNotFound(), summarize.WithProcessors( - summarize.RecordContextualError, + summarize.ErrorActionHandler, summarize.RecordReconcileReq, ), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record readiness and duration metrics - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + // Add finalizer first if not exist to avoid the race condition - // between init and delete + // between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue return } - // Examine if the object is under deletion - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - recResult, retErr = r.reconcileDelete(ctx, obj) + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("Reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil return } @@ -258,19 +270,35 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( r.reconcileSource, r.reconcileArtifact, } - recResult, retErr = r.reconcile(ctx, obj, reconcilers) + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) return } // reconcile iterates through the helmChartReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *HelmChartReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() - // Mark as reconciling if generation differs. - if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var reconcileAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + reconcileAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } } // Run the sub-reconcilers and build the result of reconciliation. @@ -280,7 +308,7 @@ func (r *HelmChartReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmC resErr error ) for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, &build) + recResult, err := rec(ctx, sp, obj, &build) // Exit immediately on ResultRequeue. if recResult == sreconcile.ResultRequeue { return sreconcile.ResultRequeue, nil @@ -307,17 +335,12 @@ func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *source // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { annotations := map[string]string{ - sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision, - sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum, - } - - var oldChecksum string - if oldObj.GetArtifact() != nil { - oldChecksum = oldObj.GetArtifact().Checksum + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, } // Notify on new artifact and failure recovery. - if oldChecksum != newObj.GetArtifact().Checksum { + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, reasonForBuild(build), build.Summary()) ctrl.LoggerFrom(ctx).Info(build.Summary()) @@ -343,22 +366,49 @@ func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *source // condition is added. // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. -func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) - // Determine if the advertised artifact is still in storage - if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { - obj.Status.Artifact = nil - obj.Status.URL = "" - // Remove the condition as the artifact doesn't exist. - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } } // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } return sreconcile.ResultSuccess, nil } @@ -370,23 +420,29 @@ func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, obj *sourcev return sreconcile.ResultSuccess, nil } -func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { +func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { + // Remove any failed verification condition. + // The reason is that a failing verification should be recalculated. + if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + // Retrieve the source s, err := r.getSource(ctx, obj) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to get source: %w", err), - Reason: "SourceUnavailable", - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to get source: %w", err), + "SourceUnavailable", + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) // Return Kubernetes client errors, but ignore others which can only be // solved by a change in generation if apierrs.ReasonForError(err) == metav1.StatusReasonUnknown { - return sreconcile.ResultEmpty, &serror.Stalling{ - Err: fmt.Errorf("failed to get source: %w", err), - Reason: "UnsupportedSourceKind", - } + return sreconcile.ResultEmpty, serror.NewStalling( + fmt.Errorf("failed to get source: %w", err), + "UnsupportedSourceKind", + ) } return sreconcile.ResultEmpty, e } @@ -397,7 +453,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 if helmRepo, ok := s.(*sourcev1.HelmRepository); !ok || helmRepo.Spec.Type != sourcev1.HelmRepositoryTypeOCI { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) - r.eventLogf(ctx, obj, events.EventTypeTrace, "NoSourceArtifact", + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "NoSourceArtifact", "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) return sreconcile.ResultRequeue, nil } @@ -411,7 +467,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 // Defer observation of build result defer func() { // Record both success and error observations on the object - observeChartBuild(obj, build, retErr) + observeChartBuild(ctx, sp, r.patchOptions, obj, build, retErr) // If we actually build a chart, take a historical note of any dependencies we resolved. // The reason this is a done conditionally, is because if we have a cached one in storage, @@ -419,21 +475,21 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 // a sudden (partial) disappearance of observed state. // TODO(hidde): include specific name/version information? if depNum := build.ResolvedDependencies; build.Complete() && depNum > 0 { - r.Eventf(obj, events.EventTypeTrace, "ResolvedDependencies", "resolved %d chart dependencies", depNum) + r.Eventf(obj, eventv1.EventTypeTrace, "ResolvedDependencies", "resolved %d chart dependencies", depNum) } // Handle any build error if retErr != nil { if buildErr := new(chart.BuildError); errors.As(retErr, &buildErr) { - retErr = &serror.Event{ - Err: buildErr, - Reason: buildErr.Reason.Reason, - } + retErr = serror.NewGeneric( + buildErr, + buildErr.Reason.Reason, + ) if chart.IsPersistentBuildErrorReason(buildErr.Reason) { - retErr = &serror.Stalling{ - Err: buildErr, - Reason: buildErr.Reason.Reason, - } + retErr = serror.NewStalling( + buildErr, + buildErr.Reason.Reason, + ) } } } @@ -453,74 +509,41 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1 } // buildFromHelmRepository attempts to pull and/or package a Helm chart with -// the specified data from the v1beta2.HelmRepository and v1beta2.HelmChart +// the specified data from the v1.HelmRepository and v1.HelmChart // objects. -// In case of a failure it records v1beta2.FetchFailedCondition on the chart +// In case of a failure it records v1.FetchFailedCondition on the chart // object, and returns early. func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { - var ( - tlsConfig *tls.Config - loginOpts []helmreg.LoginOption - ) + // Used to login with the repository declared provider + ctxTimeout, cancel := context.WithTimeout(ctx, repo.GetTimeout()) + defer cancel() - normalizedURL := repository.NormalizeURL(repo.Spec.URL) - // Construct the Getter options from the HelmRepository data - clientOpts := []helmgetter.Option{ - helmgetter.WithURL(normalizedURL), - helmgetter.WithTimeout(repo.Spec.Timeout.Duration), - helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), + normalizedURL, err := repository.NormalizeURL(repo.Spec.URL) + if err != nil { + return chartRepoConfigErrorReturn(err, obj) } - if secret, err := r.getHelmRepositorySecret(ctx, repo); secret != nil || err != nil { - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to get secret '%s': %w", repo.Spec.SecretRef.Name, err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Return error as the world as observed may change - return sreconcile.ResultEmpty, e - } - // Build client options from secret - opts, err := getter.ClientOptionsFromSecret(*secret) - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Requeue as content of secret might change - return sreconcile.ResultEmpty, e - } - clientOpts = append(clientOpts, opts...) - - tlsConfig, err = getter.TLSClientConfigFromSecret(*secret, normalizedURL) - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create TLS client config with secret data: %w", err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Requeue as content of secret might change - return sreconcile.ResultEmpty, e - } - - // Build registryClient options from secret - loginOpt, err := registry.LoginOptionFromSecret(normalizedURL, *secret) - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), - Reason: sourcev1.AuthenticationFailedReason, + clientOpts, certsTmpDir, err := getter.GetClientOpts(ctxTimeout, r.Client, repo, normalizedURL) + if err != nil && !errors.Is(err, getter.ErrDeprecatedTLSConfig) { + e := serror.NewGeneric( + err, + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + if certsTmpDir != "" { + defer func() { + if err := os.RemoveAll(certsTmpDir); err != nil { + r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason, + "failed to delete temporary certificates directory: %s", err) } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Requeue as content of secret might change - return sreconcile.ResultEmpty, e - } - - loginOpts = append([]helmreg.LoginOption{}, loginOpt) + }() } + getterOpts := clientOpts.GetterOpts + // Initialize the chart repository var chartRepo repository.Downloader switch repo.Spec.Type { @@ -534,13 +557,13 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * // this is needed because otherwise the credentials are stored in ~/.docker/config.json. // TODO@souleb: remove this once the registry move to Oras v2 // or rework to enable reusing credentials to avoid the unneccessary handshake operations - registryClient, credentialsFile, err := r.RegistryClientGenerator(loginOpts != nil) + registryClient, credentialsFile, err := r.RegistryClientGenerator(clientOpts.TlsConfig, clientOpts.MustLoginToRegistry(), repo.Spec.Insecure) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to construct Helm client: %w", err), - Reason: meta.FailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to construct Helm client: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -553,67 +576,103 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * }() } + var verifiers []soci.Verifier + if obj.Spec.Verify != nil { + provider := obj.Spec.Verify.Provider + verifiers, err = r.makeVerifiers(ctx, obj, *clientOpts) + if err != nil { + if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { + provider = fmt.Sprintf("%s keyless", provider) + } + e := serror.NewGeneric( + fmt.Errorf("failed to verify the signature using provider '%s': %w", provider, err), + sourcev1.VerificationError, + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + // Tell the chart repository to use the OCI client with the configured getter - clientOpts = append(clientOpts, helmgetter.WithRegistryClient(registryClient)) - ociChartRepo, err := repository.NewOCIChartRepository(normalizedURL, repository.WithOCIGetter(r.Getters), repository.WithOCIGetterOptions(clientOpts), repository.WithOCIRegistryClient(registryClient)) + getterOpts = append(getterOpts, helmgetter.WithRegistryClient(registryClient)) + chartRepoOpts := []repository.OCIChartRepositoryOption{ + repository.WithOCIGetter(r.Getters), + repository.WithOCIGetterOptions(getterOpts), + repository.WithOCIRegistryClient(registryClient), + repository.WithVerifiers(verifiers), + } + if repo.Spec.Insecure { + chartRepoOpts = append(chartRepoOpts, repository.WithInsecureHTTP()) + } + + ociChartRepo, err := repository.NewOCIChartRepository(normalizedURL, chartRepoOpts...) if err != nil { return chartRepoConfigErrorReturn(err, obj) } - chartRepo = ociChartRepo // If login options are configured, use them to login to the registry // The OCIGetter will later retrieve the stored credentials to pull the chart - if loginOpts != nil { - err = ociChartRepo.Login(loginOpts...) + if clientOpts.MustLoginToRegistry() { + err = ociChartRepo.Login(clientOpts.RegLoginOpts...) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to login to OCI registry: %w", err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to login to OCI registry: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } + chartRepo = ociChartRepo default: - httpChartRepo, err := repository.NewChartRepository(normalizedURL, r.Storage.LocalPath(*repo.GetArtifact()), r.Getters, tlsConfig, clientOpts, - repository.WithMemoryCache(r.Storage.LocalPath(*repo.GetArtifact()), r.Cache, r.TTL, func(event string) { - r.IncCacheEvents(event, obj.Name, obj.Namespace) - })) + httpChartRepo, err := repository.NewChartRepository(normalizedURL, r.Storage.LocalPath(*repo.GetArtifact()), r.Getters, clientOpts.TlsConfig, getterOpts...) if err != nil { return chartRepoConfigErrorReturn(err, obj) } - chartRepo = httpChartRepo + + // NB: this needs to be deferred first, as otherwise the Index will disappear + // before we had a chance to cache it. defer func() { - if httpChartRepo == nil { - return - } - // Cache the index if it was successfully retrieved - // and the chart was successfully built - if r.Cache != nil && httpChartRepo.Index != nil { - // The cache key have to be safe in multi-tenancy environments, - // as otherwise it could be used as a vector to bypass the helm repository's authentication. - // Using r.Storage.LocalPath(*repo.GetArtifact() is safe as the path is in the format ///. - err := httpChartRepo.CacheIndexInMemory() - if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err) - } + if err := httpChartRepo.Clear(); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to clear Helm repository index") } + }() - // Delete the index reference - if httpChartRepo.Index != nil { - httpChartRepo.Unload() + // Attempt to load the index from the cache. + if r.Cache != nil { + if index, ok := r.Cache.Get(repo.GetArtifact().Path); ok { + r.IncCacheEvents(cache.CacheEventTypeHit, repo.Name, repo.Namespace) + r.Cache.SetExpiration(repo.GetArtifact().Path, r.TTL) + httpChartRepo.Index = index.(*helmrepo.IndexFile) + } else { + r.IncCacheEvents(cache.CacheEventTypeMiss, repo.Name, repo.Namespace) + defer func() { + // If we succeed in loading the index, cache it. + if httpChartRepo.Index != nil { + if err = r.Cache.Set(repo.GetArtifact().Path, httpChartRepo.Index, r.TTL); err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err) + } + } + }() } - }() + } + chartRepo = httpChartRepo } // Construct the chart builder with scoped configuration cb := chart.NewRemoteBuilder(chartRepo) opts := chart.BuildOptions{ - ValuesFiles: obj.GetValuesFiles(), - Force: obj.Generation != obj.Status.ObservedGeneration, + ValuesFiles: obj.GetValuesFiles(), + IgnoreMissingValuesFiles: obj.Spec.IgnoreMissingValuesFiles, + Force: obj.Generation != obj.Status.ObservedGeneration, + // The remote builder will not attempt to download the chart if + // an artifact exists with the same name and version and `Force` is false. + // It will however try to verify the chart if `obj.Spec.Verify` is set, at every reconciliation. + Verify: obj.Spec.Verify != nil && obj.Spec.Verify.Provider != "", } if artifact := obj.GetArtifact(); artifact != nil { opts.CachedChart = r.Storage.LocalPath(*artifact) + opts.CachedChartValuesFiles = obj.Status.ObservedValuesFiles } // Set the VersionMetadata to the object's Generation if ValuesFiles is defined @@ -634,19 +693,19 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * } // buildFromTarballArtifact attempts to pull and/or package a Helm chart with -// the specified data from the v1beta2.HelmChart object and the given -// v1beta2.Artifact. -// In case of a failure it records v1beta2.FetchFailedCondition on the chart +// the specified data from the v1.HelmChart object and the given +// v1.Artifact. +// In case of a failure it records v1.FetchFailedCondition on the chart // object, and returns early. -func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source meta.Artifact, b *chart.Build) (sreconcile.Result, error) { // Create temporary working directory tmpDir, err := util.TempDirForObj("", obj) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create temporary working directory: %w", err), - Reason: sourcev1.DirCreationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer os.RemoveAll(tmpDir) @@ -654,36 +713,36 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj // Create directory to untar source into sourceDir := filepath.Join(tmpDir, "source") if err := os.Mkdir(sourceDir, 0o700); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create directory to untar source into: %w", err), - Reason: sourcev1.DirCreationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to create directory to untar source into: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Open the tarball artifact file and untar files into working directory f, err := os.Open(r.Storage.LocalPath(source)) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to open source artifact: %w", err), - Reason: sourcev1.ReadOperationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to open source artifact: %w", err), + sourcev1.ReadOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - if _, err = untar.Untar(f, sourceDir); err != nil { + if err = tar.Untar(f, sourceDir, tar.WithMaxUntarSize(-1)); err != nil { _ = f.Close() - return sreconcile.ResultEmpty, &serror.Event{ - Err: fmt.Errorf("artifact untar error: %w", err), - Reason: meta.FailedReason, - } + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("artifact untar error: %w", err), + meta.FailedReason, + ) } if err = f.Close(); err != nil { - return sreconcile.ResultEmpty, &serror.Event{ - Err: fmt.Errorf("artifact close error: %w", err), - Reason: meta.FailedReason, - } + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("artifact close error: %w", err), + meta.FailedReason, + ) } // Setup dependency manager @@ -700,21 +759,25 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj // Configure builder options, including any previously cached chart opts := chart.BuildOptions{ - ValuesFiles: obj.GetValuesFiles(), - Force: obj.Generation != obj.Status.ObservedGeneration, + ValuesFiles: obj.GetValuesFiles(), + IgnoreMissingValuesFiles: obj.Spec.IgnoreMissingValuesFiles, + Force: obj.Generation != obj.Status.ObservedGeneration, } - if artifact := obj.Status.Artifact; artifact != nil { + if artifact := obj.GetArtifact(); artifact != nil { opts.CachedChart = r.Storage.LocalPath(*artifact) + opts.CachedChartValuesFiles = obj.Status.ObservedValuesFiles } // Configure revision metadata for chart build if we should react to revision changes if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { rev := source.Revision if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { - // Split the reference by the `/` delimiter which may be present, - // and take the last entry which contains the SHA. - split := strings.Split(source.Revision, "/") - rev = split[len(split)-1] + rev = git.ExtractHashFromRevision(rev).String() + } + if obj.Spec.SourceRef.Kind == sourcev1.BucketKind { + if dig := digest.Digest(rev); dig.Validate() == nil { + rev = dig.Encoded() + } } if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { // The SemVer from the metadata is at times used in e.g. the label metadata for a resource @@ -758,12 +821,12 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) { // Without a complete chart build, there is little to reconcile if !b.Complete() { return sreconcile.ResultRequeue, nil @@ -773,7 +836,7 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *source defer func() { if obj.Status.ObservedChartName == b.Name && obj.GetArtifact().HasRevision(b.Version) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, reasonForBuild(b), b.Summary()) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, reasonForBuild(b), "%s", b.Summary()) } }() @@ -782,7 +845,7 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *source // Return early if the build path equals the current artifact path if curArtifact := obj.GetArtifact(); curArtifact != nil && r.Storage.LocalPath(*curArtifact) == b.Path { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) return sreconcile.ResultSuccess, nil } @@ -791,42 +854,47 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *source // Ensure artifact directory exists and acquire lock if err := r.Storage.MkdirAll(artifact); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create artifact directory: %w", err), - Reason: sourcev1.DirCreationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } unlock, err := r.Storage.Lock(artifact) if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), - Reason: sourcev1.AcquireLockFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + sourcev1.AcquireLockFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer unlock() // Copy the packaged chart to the artifact path if err = r.Storage.CopyFromPath(&artifact, b.Path); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("unable to copy Helm chart to storage: %w", err), - Reason: sourcev1.ArchiveOperationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("unable to copy Helm chart to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Record it on the object obj.Status.Artifact = artifact.DeepCopy() obj.Status.ObservedChartName = b.Name + if obj.Spec.IgnoreMissingValuesFiles { + obj.Status.ObservedValuesFiles = b.ValuesFiles + } else { + obj.Status.ObservedValuesFiles = nil + } // Update symlink on a "best effort" basis symURL, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, "failed to update status URL symlink: %s", err) } if symURL != "" { @@ -895,12 +963,12 @@ func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1 func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - return &serror.Event{ - Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), - Reason: "GarbageCollectionFailed", - } + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) } else if deleted != "" { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") } obj.Status.Artifact = nil @@ -909,14 +977,14 @@ func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1. if obj.GetArtifact() != nil { delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) if err != nil { - return &serror.Event{ - Err: fmt.Errorf("garbage collection of artifacts failed: %w", err), - Reason: "GarbageCollectionFailed", - } + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) } if len(delFiles) > 0 { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -929,69 +997,51 @@ func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1. // The callback returns an object with a state, so the caller has to do the necessary cleanup. func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Context, name, namespace string) chart.GetChartDownloaderCallback { return func(url string) (repository.Downloader, error) { - var ( - tlsConfig *tls.Config - loginOpts []helmreg.LoginOption - ) - normalizedURL := repository.NormalizeURL(url) - repo, err := r.resolveDependencyRepository(ctx, url, namespace) + normalizedURL, err := repository.NormalizeURL(url) + if err != nil { + return nil, err + } + obj, err := r.resolveDependencyRepository(ctx, url, namespace) if err != nil { // Return Kubernetes client errors, but ignore others if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown { return nil, err } - repo = &sourcev1.HelmRepository{ + obj = &sourcev1.HelmRepository{ Spec: sourcev1.HelmRepositorySpec{ URL: url, Timeout: &metav1.Duration{Duration: 60 * time.Second}, }, } } - clientOpts := []helmgetter.Option{ - helmgetter.WithURL(normalizedURL), - helmgetter.WithTimeout(repo.Spec.Timeout.Duration), - helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), - } - if secret, err := r.getHelmRepositorySecret(ctx, repo); secret != nil || err != nil { - if err != nil { - return nil, err - } - opts, err := getter.ClientOptionsFromSecret(*secret) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, opts...) - tlsConfig, err = getter.TLSClientConfigFromSecret(*secret, normalizedURL) - if err != nil { - return nil, fmt.Errorf("failed to create TLS client config for HelmRepository '%s': %w", repo.Name, err) - } + // Used to login with the repository declared provider + ctxTimeout, cancel := context.WithTimeout(ctx, obj.GetTimeout()) + defer cancel() - // Build registryClient options from secret - loginOpt, err := registry.LoginOptionFromSecret(normalizedURL, *secret) - if err != nil { - return nil, fmt.Errorf("failed to create login options for HelmRepository '%s': %w", repo.Name, err) - } - - loginOpts = append([]helmreg.LoginOption{}, loginOpt) + clientOpts, certsTmpDir, err := getter.GetClientOpts(ctxTimeout, r.Client, obj, normalizedURL) + if err != nil && !errors.Is(err, getter.ErrDeprecatedTLSConfig) { + return nil, err } + getterOpts := clientOpts.GetterOpts var chartRepo repository.Downloader if helmreg.IsOCI(normalizedURL) { - registryClient, credentialsFile, err := r.RegistryClientGenerator(loginOpts != nil) + registryClient, credentialsFile, err := r.RegistryClientGenerator(clientOpts.TlsConfig, clientOpts.MustLoginToRegistry(), obj.Spec.Insecure) if err != nil { - return nil, fmt.Errorf("failed to create registry client for HelmRepository '%s': %w", repo.Name, err) + return nil, fmt.Errorf("failed to create registry client: %w", err) } var errs []error // Tell the chart repository to use the OCI client with the configured getter - clientOpts = append(clientOpts, helmgetter.WithRegistryClient(registryClient)) + getterOpts = append(getterOpts, helmgetter.WithRegistryClient(registryClient)) ociChartRepo, err := repository.NewOCIChartRepository(normalizedURL, repository.WithOCIGetter(r.Getters), - repository.WithOCIGetterOptions(clientOpts), + repository.WithOCIGetterOptions(getterOpts), repository.WithOCIRegistryClient(registryClient), + repository.WithCertificatesStore(certsTmpDir), repository.WithCredentialsFile(credentialsFile)) if err != nil { - errs = append(errs, fmt.Errorf("failed to create OCI chart repository for HelmRepository '%s': %w", repo.Name, err)) + errs = append(errs, fmt.Errorf("failed to create OCI chart repository: %w", err)) // clean up the credentialsFile if credentialsFile != "" { if err := os.Remove(credentialsFile); err != nil { @@ -1003,10 +1053,10 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont // If login options are configured, use them to login to the registry // The OCIGetter will later retrieve the stored credentials to pull the chart - if loginOpts != nil { - err = ociChartRepo.Login(loginOpts...) + if clientOpts.MustLoginToRegistry() { + err = ociChartRepo.Login(clientOpts.RegLoginOpts...) if err != nil { - errs = append(errs, fmt.Errorf("failed to login to OCI chart repository for HelmRepository '%s': %w", repo.Name, err)) + errs = append(errs, fmt.Errorf("failed to login to OCI chart repository: %w", err)) // clean up the credentialsFile errs = append(errs, ociChartRepo.Clear()) return nil, kerrors.NewAggregate(errs) @@ -1015,19 +1065,28 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont chartRepo = ociChartRepo } else { - httpChartRepo, err := repository.NewChartRepository(normalizedURL, "", r.Getters, tlsConfig, clientOpts) + httpChartRepo, err := repository.NewChartRepository(normalizedURL, "", r.Getters, clientOpts.TlsConfig, getterOpts...) if err != nil { return nil, err } - // Ensure that the cache key is the same as the artifact path - // otherwise don't enable caching. We don't want to cache indexes - // for repositories that are not reconciled by the source controller. - if repo.Status.Artifact != nil { - httpChartRepo.CachePath = r.Storage.LocalPath(*repo.GetArtifact()) - httpChartRepo.SetMemCache(r.Storage.LocalPath(*repo.GetArtifact()), r.Cache, r.TTL, func(event string) { - r.IncCacheEvents(event, name, namespace) - }) + if artifact := obj.GetArtifact(); artifact != nil { + httpChartRepo.Path = r.Storage.LocalPath(*artifact) + + // Attempt to load the index from the cache. + if r.Cache != nil { + if index, ok := r.Cache.Get(artifact.Path); ok { + r.IncCacheEvents(cache.CacheEventTypeHit, name, namespace) + r.Cache.SetExpiration(artifact.Path, r.TTL) + httpChartRepo.Index = index.(*helmrepo.IndexFile) + } else { + r.IncCacheEvents(cache.CacheEventTypeMiss, name, namespace) + if err := httpChartRepo.LoadFromPath(); err != nil { + return nil, err + } + r.Cache.Set(artifact.Path, httpChartRepo.Index, r.TTL) + } + } } chartRepo = httpChartRepo @@ -1054,29 +1113,13 @@ func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, u return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace) } -func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) { - if repository.Spec.SecretRef == nil { - return nil, nil - } - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - return nil, err - } - return &secret, nil -} - func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string { repo, ok := o.(*sourcev1.HelmRepository) if !ok { panic(fmt.Sprintf("Expected a HelmRepository, got %T", o)) } - u := repository.NormalizeURL(repo.Spec.URL) - if u != "" { + u, err := repository.NormalizeURL(repo.Spec.URL) + if u != "" && err == nil { return []string{u} } return nil @@ -1090,37 +1133,41 @@ func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { return []string{fmt.Sprintf("%s/%s", hc.Spec.SourceRef.Kind, hc.Spec.SourceRef.Name)} } -func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) []reconcile.Request { +func (r *HelmChartReconciler) requestsForHelmRepositoryChange(ctx context.Context, o client.Object) []reconcile.Request { repo, ok := o.(*sourcev1.HelmRepository) if !ok { - panic(fmt.Sprintf("Expected a HelmRepository, got %T", o)) + ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a HelmRepository, got %T", o), "failed to get requests for HelmRepository change") + return nil } + // If we do not have an artifact, we have no requests to make if repo.GetArtifact() == nil { return nil } - ctx := context.Background() var list sourcev1.HelmChartList if err := r.List(ctx, &list, client.MatchingFields{ sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.HelmRepositoryKind, repo.Name), }); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for HelmRepository change") return nil } var reqs []reconcile.Request - for _, i := range list.Items { - if i.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + for i, v := range list.Items { + if v.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&list.Items[i])}) } } return reqs } -func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) []reconcile.Request { +func (r *HelmChartReconciler) requestsForGitRepositoryChange(ctx context.Context, o client.Object) []reconcile.Request { repo, ok := o.(*sourcev1.GitRepository) if !ok { - panic(fmt.Sprintf("Expected a GitRepository, got %T", o)) + ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a GitRepository, got %T", o), + "failed to get reconcile requests for GitRepository change") + return nil } // If we do not have an artifact, we have no requests to make @@ -1129,25 +1176,28 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) [] } var list sourcev1.HelmChartList - if err := r.List(context.TODO(), &list, client.MatchingFields{ + if err := r.List(ctx, &list, client.MatchingFields{ sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.GitRepositoryKind, repo.Name), }); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for GitRepository change") return nil } var reqs []reconcile.Request - for _, i := range list.Items { - if i.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + for i, v := range list.Items { + if !repo.GetArtifact().HasRevision(v.Status.ObservedSourceArtifactRevision) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&list.Items[i])}) } } return reqs } -func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconcile.Request { +func (r *HelmChartReconciler) requestsForBucketChange(ctx context.Context, o client.Object) []reconcile.Request { bucket, ok := o.(*sourcev1.Bucket) if !ok { - panic(fmt.Sprintf("Expected a Bucket, got %T", o)) + ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a Bucket, got %T", o), + "failed to get reconcile requests for Bucket change") + return nil } // If we do not have an artifact, we have no requests to make @@ -1156,16 +1206,17 @@ func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconci } var list sourcev1.HelmChartList - if err := r.List(context.TODO(), &list, client.MatchingFields{ + if err := r.List(ctx, &list, client.MatchingFields{ sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name), }); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for Bucket change") return nil } var reqs []reconcile.Request - for _, i := range list.Items { - if i.Status.ObservedSourceArtifactRevision != bucket.GetArtifact().Revision { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + for i, v := range list.Items { + if !bucket.GetArtifact().HasRevision(v.Status.ObservedSourceArtifactRevision) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&list.Items[i])}) } } return reqs @@ -1188,16 +1239,29 @@ func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, } // observeChartBuild records the observation on the given given build and error on the object. -func observeChartBuild(obj *sourcev1.HelmChart, build *chart.Build, err error) { +func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []patch.Option, obj *sourcev1.HelmChart, build *chart.Build, err error) { if build.HasMetadata() { if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", build.Summary()) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", "%s", build.Summary()) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", build.Summary()) + if err := sp.Patch(ctx, obj, pOpts...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + } } } if build.Complete() { conditions.Delete(obj, sourcev1.FetchFailedCondition) conditions.Delete(obj, sourcev1.BuildFailedCondition) + if build.VerifiedResult == soci.VerificationResultSuccess { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version %s", build.Version) + } + } + + if obj.Spec.Verify == nil { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) } if err != nil { @@ -1212,10 +1276,14 @@ func observeChartBuild(obj *sourcev1.HelmChart, build *chart.Build, err error) { switch buildErr.Reason { case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: conditions.Delete(obj, sourcev1.FetchFailedCondition) - conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, "%s", buildErr) + case chart.ErrChartVerification: + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, "%s", buildErr) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "%s", buildErr) default: conditions.Delete(obj, sourcev1.BuildFailedCondition) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, "%s", buildErr) } return } @@ -1234,18 +1302,156 @@ func reasonForBuild(build *chart.Build) string { func chartRepoConfigErrorReturn(err error, obj *sourcev1.HelmChart) (sreconcile.Result, error) { switch err.(type) { case *url.Error: - e := &serror.Stalling{ - Err: fmt.Errorf("invalid Helm repository URL: %w", err), - Reason: sourcev1.URLInvalidReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e default: - e := &serror.Stalling{ - Err: fmt.Errorf("failed to construct Helm client: %w", err), - Reason: meta.FailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewStalling( + fmt.Errorf("failed to construct Helm client: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } + +// makeVerifiers returns a list of verifiers for the given chart. +func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *sourcev1.HelmChart, clientOpts getter.ClientOpts) ([]soci.Verifier, error) { + var verifiers []soci.Verifier + verifyOpts := []remote.Option{} + + if clientOpts.Authenticator != nil { + verifyOpts = append(verifyOpts, remote.WithAuth(clientOpts.Authenticator)) + } else { + verifyOpts = append(verifyOpts, remote.WithAuthFromKeychain(clientOpts.Keychain)) + } + + switch obj.Spec.Verify.Provider { + case "cosign": + defaultCosignOciOpts := []scosign.Options{ + scosign.WithRemoteOptions(verifyOpts...), + } + + // get the public keys from the given secret + if secretRef := obj.Spec.Verify.SecretRef; secretRef != nil { + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctx, verifySecret) + if err != nil { + return nil, err + } + + for k, data := range pubSecret.Data { + // search for public keys in the secret + if strings.HasSuffix(k, ".pub") { + verifier, err := scosign.NewCosignVerifier(ctx, append(defaultCosignOciOpts, scosign.WithPublicKey(data))...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + } + } + + if len(verifiers) == 0 { + return nil, fmt.Errorf("no public keys found in secret '%s'", verifySecret.String()) + } + return verifiers, nil + } + + // if no secret is provided, add a keyless verifier + var identities []cosign.Identity + for _, match := range obj.Spec.Verify.MatchOIDCIdentity { + identities = append(identities, cosign.Identity{ + IssuerRegExp: match.Issuer, + SubjectRegExp: match.Subject, + }) + } + defaultCosignOciOpts = append(defaultCosignOciOpts, scosign.WithIdentities(identities)) + + verifier, err := scosign.NewCosignVerifier(ctx, defaultCosignOciOpts...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return verifiers, nil + case "notation": + // get the public keys from the given secret + secretRef := obj.Spec.Verify.SecretRef + + if secretRef == nil { + return nil, fmt.Errorf("verification secret cannot be empty: '%s'", obj.Name) + } + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctx, verifySecret) + if err != nil { + return nil, err + } + + data, ok := pubSecret.Data[notation.DefaultTrustPolicyKey] + if !ok { + return nil, fmt.Errorf("'%s' not found in secret '%s'", notation.DefaultTrustPolicyKey, verifySecret.String()) + } + + var doc trustpolicy.Document + + if err := json.Unmarshal(data, &doc); err != nil { + return nil, fmt.Errorf("error occurred while parsing %s: %w", notation.DefaultTrustPolicyKey, err) + } + + var certs [][]byte + + for k, data := range pubSecret.Data { + if strings.HasSuffix(k, ".crt") || strings.HasSuffix(k, ".pem") { + certs = append(certs, data) + } + } + + if certs == nil { + return nil, fmt.Errorf("no certificates found in secret '%s'", verifySecret.String()) + } + + trustPolicy := notation.CleanTrustPolicy(&doc, ctrl.LoggerFrom(ctx)) + defaultNotationOciOpts := []notation.Options{ + notation.WithTrustPolicy(trustPolicy), + notation.WithRemoteOptions(verifyOpts...), + notation.WithAuth(clientOpts.Authenticator), + notation.WithKeychain(clientOpts.Keychain), + notation.WithInsecureRegistry(clientOpts.Insecure), + notation.WithLogger(ctrl.LoggerFrom(ctx)), + notation.WithRootCertificates(certs), + } + + verifier, err := notation.NewNotationVerifier(defaultNotationOciOpts...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return verifiers, nil + default: + return nil, fmt.Errorf("unsupported verification provider: %s", obj.Spec.Verify.Provider) + } +} + +// retrieveSecret retrieves a secret from the specified namespace with the given secret name. +// It returns the retrieved secret and any error encountered during the retrieval process. +func (r *HelmChartReconciler) retrieveSecret(ctx context.Context, verifySecret types.NamespacedName) (corev1.Secret, error) { + + var pubSecret corev1.Secret + + if err := r.Get(ctx, verifySecret, &pubSecret); err != nil { + return corev1.Secret{}, err + } + return pubSecret, nil +} diff --git a/internal/controller/helmchart_controller_test.go b/internal/controller/helmchart_controller_test.go new file mode 100644 index 000000000..190a9f8b5 --- /dev/null +++ b/internal/controller/helmchart_controller_test.go @@ -0,0 +1,3515 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "bytes" + "context" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/notaryproject/notation-core-go/signature/cose" + "github.com/notaryproject/notation-core-go/testhelper" + "github.com/notaryproject/notation-go" + nr "github.com/notaryproject/notation-go/registry" + "github.com/notaryproject/notation-go/signer" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" + "github.com/sigstore/cosign/v2/pkg/cosign" + hchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + helmreg "helm.sh/helm/v3/pkg/registry" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + oras "oras.land/oras-go/v2/registry/remote" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/helm/chart" + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/registry" + "github.com/fluxcd/source-controller/internal/oci" + snotation "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +func TestHelmChartReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "helmchart-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + helmchart := &sourcev1.HelmChart{} + helmchart.Name = "test-helmchart" + helmchart.Namespace = namespaceName + helmchart.Spec = sourcev1.HelmChartSpec{ + Interval: metav1.Duration{Duration: interval}, + Chart: "foo", + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: "HelmRepository", + Name: "bar", + }, + } + // Add a test finalizer to prevent the object from getting deleted. + helmchart.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, helmchart)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, helmchart)).NotTo(HaveOccurred()) + + r := &HelmChartReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(helmchart)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestHelmChartReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + chartPath = "testdata/charts/helmchart" + ) + + serverFactory, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(serverFactory.Root()) + + g.Expect(serverFactory.PackageChartWithVersion(chartPath, chartVersion)).To(Succeed()) + g.Expect(serverFactory.GenerateIndex()).To(Succeed()) + + tests := []struct { + name string + beforeFunc func(repository *sourcev1.HelmRepository) + assertFunc func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + }{ + { + name: "Reconciles chart build", + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + origObj := obj.DeepCopy() + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmChart to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + // Check if the cache contains the index. + repoKey := client.ObjectKey{Name: repository.Name, Namespace: repository.Namespace} + err = testEnv.Get(ctx, repoKey, repository) + g.Expect(err).ToNot(HaveOccurred()) + _, found := testCache.Get(repository.GetArtifact().Path) + g.Expect(found).To(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + // NOTE: Since the object is already created when received in + // this assertFunc, reset the ResourceVersion from the object + // before recreating it to avoid API server error. + obj = origObj.DeepCopy() + obj.ResourceVersion = "" + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) + }, + }, + { + name: "Stalling on invalid repository URL", + beforeFunc: func(repository *sourcev1.HelmRepository) { + repository.Spec.URL = "https://unsupported/foo://" // Invalid URL + }, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + // Wait for HelmChart to be FetchFailed == true + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) { + return false + } + // observedGeneration is -1 because we have no successful reconciliation + return obj.Status.ObservedGeneration == -1 + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }, + }, + { + name: "Stalling on invalid oci repository URL", + beforeFunc: func(repository *sourcev1.HelmRepository) { + repository.Spec.URL = strings.Replace(repository.Spec.URL, "http", "oci", 1) + }, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + // Wait for HelmChart to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) { + return false + } + // observedGeneration is -1 because we have no successful reconciliation + return obj.Status.ObservedGeneration == -1 + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server := testserver.NewHTTPServer(serverFactory.Root()) + server.Start() + defer server.Stop() + + ns, err := testEnv.CreateNamespace(ctx, "helmchart") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + repository := sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: server.URL(), + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(&repository) + } + + g.Expect(testEnv.CreateAndWait(ctx, &repository)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, &repository)).To(Succeed()) }() + + obj := sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmChartSpec{ + Chart: chartName, + Version: chartVersion, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, &obj)).To(Succeed()) + + if tt.assertFunc != nil { + tt.assertFunc(g, &obj, &repository) + } + }) + } +} + +func TestHelmChartReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmChart, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + defer func() { + g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) + }() + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(context.TODO(), sp, obj, nil) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestHelmChartReconciler_reconcileSource(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + gitArtifact := &meta.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(st.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) + + tests := []struct { + name string + source sourcev1.Source + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Observes Artifact revision and build result", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeTrue()) + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(BeARegularFile()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + })) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Existing artifact makes AritfactOutdated=True", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeTrue()) + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(BeARegularFile()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + })) + }, + }, + { + name: "Error on unavailable source", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: sourcev1.GitRepositoryKind, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + })) + }, + }, + { + name: "Stalling on unsupported source kind", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: "Unsupported", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: unsupported source kind"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + })) + }, + }, + { + name: "Stalling on persistent build error", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Spec.ValuesFiles = []string{"invalid.yaml"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ValuesFilesError", "values files merge error: no values file found at path"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + })) + }, + }, + { + name: "ResultRequeue when source artifact is unavailable", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{}, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Status.ObservedSourceArtifactRevision = "foo" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultRequeue, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo")) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + })) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + if tt.source != nil { + clientBuilder.WithRuntimeObjects(tt.source) + } + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: st, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "chart", + Namespace: "default", + Generation: 1, + }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(&obj) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), &obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), &obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(&obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, &obj, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, b, obj) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, &obj) + }) + } +} + +func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { + g := NewWithT(t) + + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + higherChartVersion = "0.3.0" + chartPath = "testdata/charts/helmchart" + ) + + serverFactory, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(serverFactory.Root()) + + for _, ver := range []string{chartVersion, higherChartVersion} { + g.Expect(serverFactory.PackageChartWithVersion(chartPath, ver)).To(Succeed()) + } + g.Expect(serverFactory.GenerateIndex()).To(Succeed()) + + type options struct { + username string + password string + } + + tests := []struct { + name string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Reconciles chart build", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = "helmchart" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Reconciles chart build with repository credentials", + server: options{ + username: "foo", + password: "bar", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Data: map[string][]byte{ + "username": []byte("foo"), + "password": []byte("bar"), + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Uses artifact as build cache", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + }, + { + name: "Uses artifact as build cache with observedValuesFiles", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + }, + { + name: "Sets Generation as VersionMetadata with values files", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Generation = 3 + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion + "+3")) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Missing values files are an error", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.ValuesFiles = []string{"missing.yaml"} + }, + wantErr: &chart.BuildError{Err: errors.New("values files merge error: failed to merge chart values: no values file found at path 'missing.yaml'")}, + }, + { + name: "All missing values files ignored", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Spec.ValuesFiles = []string{"missing.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion + "+0")) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Partial missing values files ignored", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml", "invalid.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion + "+0")) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Forces build on generation change", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Generation = 3 + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + + obj.Status.ObservedGeneration = 2 + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Event on unsuccessful secret retrieval", + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "invalid", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), + })) + }, + }, + { + name: "Stalling on invalid client options", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "file://unsupported" // Unsupported protocol + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), + })) + }, + }, + { + name: "Stalling on invalid repository URL", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "://unsupported" // Invalid URL + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "invalid Helm repository URL"), + })) + }, + }, + { + name: "BuildError on temporary build error", + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + obj.Spec.Chart = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server := testserver.NewHTTPServer(serverFactory.Root()) + server.Start() + defer server.Stop() + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret.DeepCopy()) + } + + testStorage, err := newTestStorage(server) + g.Expect(err).ToNot(HaveOccurred()) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: server.URL(), + Timeout: &metav1.Duration{Duration: timeout}, + }, + Status: sourcev1.HelmRepositoryStatus{ + Artifact: &meta.Artifact{ + Path: "index.yaml", + }, + }, + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmChartSpec{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, repository) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, obj, b) + } + }) + } +} + +func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).NotTo(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, testRegistryServer, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + tests := []struct { + name string + secret *corev1.Secret + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Reconciles chart build with docker repository credentials", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + ".dockerconfigjson": []byte(`{"auths":{"` + + testRegistryServer.registryHost + `":{"` + + `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`), + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Reconciles chart build with repository credentials", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Data: map[string][]byte{ + "username": []byte(testRegistryUsername), + "password": []byte(testRegistryPassword), + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Uses artifact as build cache", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + }, + { + name: "Forces build on generation change", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Generation = 3 + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + + obj.Status.ObservedGeneration = 2 + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Event on unsuccessful secret retrieval", + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "invalid", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), + })) + }, + }, + { + name: "Stalling on invalid client options", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "https://unsupported" // Unsupported protocol + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("failed to construct Helm client: invalid OCI registry URL: https://unsupported")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), + })) + }, + }, + { + name: "BuildError on temporary build error", + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + obj.Spec.Chart = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret.DeepCopy()) + } + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", testRegistryServer.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmChartSpec{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, repository) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) + + if tt.wantErr != nil { + g.Expect(err).To(HaveOccurred()) + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + } + + if tt.assertFunc != nil { + tt.assertFunc(g, obj, b) + } + }) + } +} + +func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + chartsArtifact := &meta.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(st.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) + yamlArtifact := &meta.Artifact{ + Revision: "9876abcd", + Path: "values.yaml", + } + g.Expect(st.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: "cached.tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + tests := []struct { + name string + source meta.Artifact + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Resolves chart dependencies and builds", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchartwithdeps" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchartwithdeps")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.ResolvedDependencies).To(Equal(4)) + g.Expect(build.Path).To(BeARegularFile()) + chart, err := secureloader.LoadFile(build.Path) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chart.Metadata.Name).To(Equal("helmchartwithdeps")) + g.Expect(chart.Metadata.Version).To(Equal("0.1.0")) + g.Expect(chart.Dependencies()).To(HaveLen(4)) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ReconcileStrategyRevision sets VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+abcdefg12345")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ValuesFiles sets Generation as VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 3 + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ValuesFiles = []string{ + filepath.Join(obj.Spec.Chart, "values.yaml"), + filepath.Join(obj.Spec.Chart, "override.yaml"), + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+3")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{ + "testdata/charts/helmchart/values.yaml", + "testdata/charts/helmchart/override.yaml", + })) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Chart from storage cache", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + }, + { + name: "Chart from storage cache with ObservedValuesFiles", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + }, + { + name: "Generation change forces rebuild", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 2 + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedGeneration = 1 + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Empty source artifact", + source: meta.Artifact{}, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("no such file or directory")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + { + name: "Invalid artifact type", + source: *yamlArtifact, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("artifact untar error: requires gzip-compressed body")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: st, + Getters: testGetters, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "artifact", + Namespace: "default", + }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + got, err := r.buildFromTarballArtifact(context.TODO(), obj, tt.source, &b) + if err != nil { + t.Log(err) + } + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, b) + } + }) + } +} + +func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + build *chart.Build + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj *sourcev1.HelmChart) + }{ + { + name: "Incomplete build requeues and does not update status", + build: &chart.Build{}, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "Foo", ""), + }, + }, + { + name: "Copying artifact to storage from build makes ArtifactInStorage=True", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Up-to-date chart build does not persist artifact to storage", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{ + Path: "testdata/charts/helmchart-0.1.0.tgz", + } + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.ObservedChartName).To(BeEmpty()) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + }, + { + name: "Restores conditions in case artifact matches current chart build", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + Packaged: true, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.ObservedChartName = "helmchart" + obj.Status.Artifact = &meta.Artifact{ + Revision: "0.1.0", + Path: "testdata/charts/helmchart-0.1.0.tgz", + } + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Updates ObservedValuesFiles after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", []string{"values.yaml", "override.yaml"}), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Updates ObservedValuesFiles with IgnoreMissingValuesFiles after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", []string{"values.yaml", "override.yaml"}), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + obj.Spec.ValuesFiles = []string{"values.yaml", "missing.yaml", "override.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "reconcile-artifact-", + Generation: 1, + }, + Status: sourcev1.HelmChartStatus{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(ctx, sp, obj, tt.build) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} + +func TestHelmChartReconciler_getSource(t *testing.T) { + mocks := []client.Object{ + &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmrepository", + Namespace: "foo", + }, + }, + &sourcev1.GitRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.GitRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "foo", + }, + }, + &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Namespace: "foo", + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithStatusSubresource(&sourcev1.HelmChart{}). + WithObjects(mocks...) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + tests := []struct { + name string + obj *sourcev1.HelmChart + want sourcev1.Source + wantErr bool + }{ + { + name: "Get HelmRepository source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[0].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[0].GetName(), + Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[0].(sourcev1.Source), + }, + { + name: "Get GitRepository source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[1].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[1].(sourcev1.Source), + }, + { + name: "Get Bucket source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[2].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[2].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[2].(sourcev1.Source), + }, + { + name: "Error on client error", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[2].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + wantErr: true, + }, + { + name: "Error on unsupported source kind", + obj: &sourcev1.HelmChart{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: "unsupported", + Kind: "Unsupported", + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := r.getSource(context.TODO(), tt.obj) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(got).To(BeNil()) + return + } + + // TODO(stefan): Remove this workaround when the controller-runtime fake client restores TypeMeta + // https://github.com/kubernetes-sigs/controller-runtime/issues/3302 + unstructuredGot, err := runtime.DefaultUnstructuredConverter.ToUnstructured(got) + g.Expect(err).ToNot(HaveOccurred()) + gotName, _, err := unstructured.NestedFieldCopy(unstructuredGot, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + unstructuredWant, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.want) + g.Expect(err).ToNot(HaveOccurred()) + wantName, _, err := unstructured.NestedFieldCopy(unstructuredWant, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(gotName).To(Equal(wantName)) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestHelmChartReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.HelmChartStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmChartReconcileFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcileFunc { + return func(_ context.Context, _ *patch.SerialPatcher, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmChartReconcileFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmChartReconcileFunc{ + func(_ context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(_ context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmChartStatus{ + ObservedGeneration: tt.observedGeneration, + }, + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcile(context.TODO(), sp, obj, tt.reconcileFuncs) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func mockChartBuild(name, version, path string, valuesFiles []string) *chart.Build { + var copyP string + if path != "" { + f, err := os.Open(path) + if err == nil { + defer f.Close() + ff, err := os.CreateTemp("", "chart-mock-*.tgz") + if err == nil { + defer ff.Close() + if _, err = io.Copy(ff, f); err == nil { + copyP = ff.Name() + } + } + } + } + return &chart.Build{ + Name: name, + Version: version, + Path: copyP, + ValuesFiles: valuesFiles, + } +} + +func TestHelmChartReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmChart) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "positive conditions only", + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, "ChartPackageError", "some error") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartPackageError", "some error"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.HelmChart{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmChartKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmchart", + Namespace: "foo", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithObjects(obj). + WithStatusSubresource(&sourcev1.HelmChart{}) + + c := clientBuilder.Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmChartReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_notify(t *testing.T) { + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.HelmChart) + newObjBeforeFunc func(obj *sourcev1.HelmChart) + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + }, + wantEvent: "Normal ChartPackageSucceeded packaged", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal ChartPackageSucceeded packaged", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal ChartPackageSucceeded packaged", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.HelmChart{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &HelmChartReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + build := &chart.Build{ + Name: "foo", + Version: "1.0.0", + Path: "some/path", + Packaged: true, + } + reconciler.notify(ctx, oldObj, newObj, build, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + type secretOptions struct { + username string + password string + } + + tests := []struct { + name string + url string + registryOpts registryOptions + secretOpts secretOptions + secret *corev1.Secret + certSecret *corev1.Secret + insecure bool + provider string + providerImg string + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without basic auth", + want: sreconcile.ResultSuccess, + insecure: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTP with basic auth secret", + want: sreconcile.ResultSuccess, + insecure: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTP registry - basic auth with invalid secret", + want: sreconcile.ResultEmpty, + wantErr: true, + insecure: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + secretOpts: secretOptions{ + username: "wrong-pass", + password: "wrong-pass", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to login to OCI registry"), + }, + }, + { + name: "with contextual login provider", + wantErr: true, + insecure: true, + provider: "aws", + providerImg: "oci://123456789000.dkr.ecr.us-east-2.amazonaws.com/test", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to get credential from"), + }, + }, + { + name: "with contextual login provider and secretRef", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + provider: "azure", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTPS With invalid CA cert", + wantErr: true, + registryOpts: registryOptions{ + withTLS: true, + withClientCertAuth: true, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "certs-secretref", + }, + Data: map[string][]byte{ + "ca.crt": []byte("invalid caFile"), + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to construct Helm client's TLS config: failed to parse CA certificate"), + }, + }, + { + name: "HTTPS With CA cert only", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "certs-secretref", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTPS With CA cert and client cert auth", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + withClientCertAuth: true, + }, + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "certs-secretref", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": clientPrivateKey, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + workspaceDir := t.TempDir() + + server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "testdata/certs/client.pem", "testdata/certs/client-key.pem", "testdata/certs/ca.pem") + g.Expect(err).ToNot(HaveOccurred()) + + repo := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Type: sourcev1.HelmRepositoryTypeOCI, + Provider: sourcev1.GenericOCIProvider, + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Insecure: tt.insecure, + }, + } + + if tt.provider != "" { + repo.Spec.Provider = tt.provider + } + // If a provider specific image is provided, overwrite existing URL + // set earlier. It'll fail, but it's necessary to set them because + // the login check expects the URLs to be of certain pattern. + if tt.providerImg != "" { + repo.Spec.URL = tt.providerImg + } + + if tt.secretOpts.username != "" && tt.secretOpts.password != "" { + tt.secret.Data[".dockerconfigjson"] = []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`, + server.registryHost, tt.secretOpts.username, tt.secretOpts.password)) + } + + if tt.secret != nil { + repo.Spec.SecretRef = &meta.LocalObjectReference{ + Name: tt.secret.Name, + } + clientBuilder.WithObjects(tt.secret) + } + + if tt.certSecret != nil { + repo.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.certSecret.Name, + } + clientBuilder.WithObjects(tt.certSecret) + } + + clientBuilder.WithObjects(repo) + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.HelmChartSpec{ + Chart: metadata.Name, + Version: metadata.Version, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repo.Name, + }, + Interval: metav1.Duration{Duration: interval}, + }, + } + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + var b chart.Build + defer func() { + if _, err := os.Stat(b.Path); !os.IsNotExist(err) { + err := os.Remove(b.Path) + g.Expect(err).NotTo(HaveOccurred()) + } + }() + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testing.T) { + tests := []struct { + name string + version string + want sreconcile.Result + wantErr bool + beforeFunc func(obj *sourcev1.HelmChart) + assertConditions []metav1.Condition + revision string + }{ + { + name: "signed image with no identity matching specified should pass verification", + version: "6.5.1", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "signed image with correct subject and issuer should pass verification", + version: "6.5.1", + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "signed image with incorrect and correct identity matchers should pass verification", + version: "6.5.1", + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "signed image with incorrect subject and issuer should not pass verification", + version: "6.5.1", + wantErr: true, + want: sreconcile.ResultEmpty, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no matching signatures: none of the expected identities matched what was in the certificate"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no matching signatures"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "unsigned image should not pass verification", + version: "6.1.0", + wantErr: true, + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), + }, + revision: "6.1.0@sha256:642383f56ccb529e3f658d40312d01b58d9bc6caeef653da43e58d1afe88982a", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/charts", + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + }, + } + clientBuilder.WithObjects(repository) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: testStorage, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + Version: tt.version, + Chart: "podinfo", + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + }, + }, + } + chartUrl := fmt.Sprintf("%s/%s:%s", repository.Spec.URL, obj.Spec.Chart, obj.Spec.Version) + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.Chart) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + var b chart.Build + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + sg, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelStrict.Name, Override: map[trustpolicy.ValidationType]trustpolicy.ValidationAction{trustpolicy.TypeRevocation: trustpolicy.ActionSkip}}, + TrustStores: []string{"ca:valid-trust-store"}, + TrustedIdentities: []string{"*"}, + }, + }, + } + + tests := []struct { + name string + shouldSign bool + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + wantErrMsg string + addMultipleCerts bool + provideNoCert bool + provideNoPolicy bool + assertConditions []metav1.Condition + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "unsigned charts should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "chart verification error: failed to verify : no signature", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signature"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signature"), + }, + }, + { + name: "signed charts should pass verification", + shouldSign: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "multiple certs should still pass verification", + addMultipleCerts: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = nil + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled '' chart with version ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "no cert provided should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + wantErr: true, + provideNoCert: true, + // no namespace but the namespace name should appear before the /notation-config + wantErrMsg: "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'"), + }, + }, + { + name: "empty string should fail verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + provideNoPolicy: true, + wantErr: true, + wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + + policy, err := json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + + data := map[string][]byte{} + + if tt.addMultipleCerts { + data["a.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("a not used for signing").Cert.Raw + data["b.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("b not used for signing").Cert.Raw + data["c.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("c not used for signing").Cert.Raw + } + + if !tt.provideNoCert { + data["notation.crt"] = certTuple.Cert.Raw + } + + if !tt.provideNoPolicy { + data["trustpolicy.json"] = policy + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "notation-config", + }, + Data: data, + } + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-trust-store", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + clientBuilder.WithObjects(repository, secret, caSecret) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + + chartUrl := fmt.Sprintf("oci://%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.shouldSign { + artifact := fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + remoteRepo, err := oras.NewRepository(artifact) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo.PlainHTTP = true + + repo := nr.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifact, + } + + _, err = notation.Sign(ctx, sg, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + } + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", chartUrl) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + pf := func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + } + + keys, err := cosign.GenerateKeyPair(pf) + g.Expect(err).ToNot(HaveOccurred()) + + err = os.WriteFile(path.Join(tmpDir, "cosign.key"), keys.PrivateBytes, 0600) + g.Expect(err).ToNot(HaveOccurred()) + + defer func() { + err := os.Remove(path.Join(tmpDir, "cosign.key")) + g.Expect(err).ToNot(HaveOccurred()) + }() + + tests := []struct { + name string + shouldSign bool + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + wantErrMsg string + assertConditions []metav1.Condition + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "unsigned charts should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + SecretRef: &meta.LocalObjectReference{Name: "cosign-key"}, + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "chart verification error: failed to verify : no signatures found", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), + }, + }, + { + name: "unsigned charts should not pass keyless verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), + }, + }, + { + name: "signed charts should pass verification", + shouldSign: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + SecretRef: &meta.LocalObjectReference{Name: "cosign-key"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = nil + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled '' chart with version ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cosign-key", + }, + Data: map[string][]byte{ + "cosign.pub": keys.PublicBytes, + }} + + clientBuilder.WithObjects(repository, secret) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + + chartUrl := fmt.Sprintf("oci://%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.shouldSign { + ko := coptions.KeyOpts{ + KeyRef: path.Join(tmpDir, "cosign.key"), + PassFunc: pf, + } + + ro := &coptions.RootOptions{ + Timeout: timeout, + } + + err = sign.SignCmd(ro, ko, coptions.SignOptions{ + Upload: true, + SkipConfirmation: true, + TlogUpload: false, + Registry: coptions.RegistryOptions{Keychain: oci.Anonymous{}, AllowHTTPRegistry: true}, + }, + []string{fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version)}) + g.Expect(err).ToNot(HaveOccurred()) + } + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", chartUrl) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +// extractChartMeta is used to extract a chart metadata from a byte array +func extractChartMeta(chartData []byte) (*hchart.Metadata, error) { + ch, err := loader.LoadArchive(bytes.NewReader(chartData)) + if err != nil { + return nil, err + } + return ch.Metadata, nil +} + +func loadTestChartToOCI(chartData []byte, server *registryClientTestServer, certFile, keyFile, cafile string) (*hchart.Metadata, error) { + // Login to the registry + err := server.registryClient.Login(server.registryHost, + helmreg.LoginOptBasicAuth(testRegistryUsername, testRegistryPassword), + helmreg.LoginOptTLSClientConfig(certFile, keyFile, cafile)) + if err != nil { + return nil, fmt.Errorf("failed to login to OCI registry: %w", err) + } + metadata, err := extractChartMeta(chartData) + if err != nil { + return nil, fmt.Errorf("failed to extract chart metadata: %w", err) + } + + // Upload the test chart + ref := fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + _, err = server.registryClient.Push(chartData, ref) + if err != nil { + return nil, fmt.Errorf("failed to push chart: %w", err) + } + + return metadata, nil +} diff --git a/controllers/helmrepository_controller.go b/internal/controller/helmrepository_controller.go similarity index 55% rename from controllers/helmrepository_controller.go rename to internal/controller/helmrepository_controller.go index 11fdf1af7..06c4494cf 100644 --- a/controllers/helmrepository_controller.go +++ b/internal/controller/helmrepository_controller.go @@ -14,38 +14,44 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( + "bytes" "context" - "crypto/tls" "errors" "fmt" "net/url" + "strings" "time" "github.com/docker/go-units" + "github.com/opencontainers/go-digest" helmgetter "helm.sh/helm/v3/pkg/getter" + helmreg "helm.sh/helm/v3/pkg/registry" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" - "github.com/fluxcd/pkg/runtime/events" + "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/cache" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/getter" @@ -56,7 +62,7 @@ import ( ) // helmRepositoryReadyCondition contains the information required to summarize a -// v1beta2.HelmRepository Ready Condition. +// v1.HelmRepository Ready Condition. var helmRepositoryReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -97,65 +103,57 @@ var helmRepositoryFailConditions = []string{ // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// HelmRepositoryReconciler reconciles a v1beta2.HelmRepository object. +// HelmRepositoryReconciler reconciles a v1.HelmRepository object. type HelmRepositoryReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics Getters helmgetter.Providers - Storage *Storage + Storage *storage.Storage ControllerName string Cache *cache.Cache TTL time.Duration *cache.CacheRecorder + + patchOptions []patch.Option } type HelmRepositoryReconcilerOptions struct { - MaxConcurrentReconciles int - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } // helmRepositoryReconcileFunc is the function type for all the -// v1beta2.HelmRepository (sub)reconcile functions. The type implementations +// v1.HelmRepository (sub)reconcile functions. The type implementations // are grouped and executed serially to perform the complete reconcile of the // object. -type helmRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) +type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) } func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error { + r.patchOptions = getPatchOptions(helmRepositoryReadyCondition.Owned, r.ControllerName) + return ctrl.NewControllerManagedBy(mgr). For(&sourcev1.HelmRepository{}). WithEventFilter( predicate.And( - predicate.Or( - intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeDefault}, - intpredicates.HelmRepositoryTypePredicate{RepositoryType: ""}, - ), + intpredicates.HelmRepositoryOCIMigrationPredicate{}, predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), ), ). WithOptions(controller.Options{ - MaxConcurrentReconciles: opts.MaxConcurrentReconciles, - RateLimiter: opts.RateLimiter, - RecoverPanic: true, + RateLimiter: opts.RateLimiter, }). Complete(r) } func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() - log := ctrl.LoggerFrom(ctx). - // Sets a reconcile ID to correlate logs from all suboperations. - WithValues("reconcileID", uuid.NewUUID()) - - // logger will be associated to the new context that is - // returned from ctrl.LoggerInto. - ctx = ctrl.LoggerInto(ctx, log) + log := ctrl.LoggerFrom(ctx) // Fetch the HelmRepository obj := &sourcev1.HelmRepository{} @@ -163,19 +161,12 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, client.IgnoreNotFound(err) } - // Record suspended status metric - r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - - // Return early if the object is suspended - if obj.Spec.Suspend { - log.Info("reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - // Initialize the patch helper with the current version of the object. - patchHelper, err := patch.NewHelper(obj, r.Client) - if err != nil { - return ctrl.Result{}, err + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // If it's of type OCI, migrate the object to static. + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { + return r.migrationToStatic(ctx, serialPatcher, obj) } // recResult stores the abstracted reconcile result. @@ -184,38 +175,47 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Always attempt to patch the object after each reconciliation. // NOTE: The final runtime result and error are set in this block. defer func() { - summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) summarizeOpts := []summarize.Option{ summarize.WithConditions(helmRepositoryReadyCondition), summarize.WithReconcileResult(recResult), summarize.WithReconcileError(retErr), summarize.WithIgnoreNotFound(), summarize.WithProcessors( - summarize.RecordContextualError, + summarize.ErrorActionHandler, summarize.RecordReconcileReq, ), - summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), summarize.WithPatchFieldOwner(r.ControllerName), } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record readiness and duration metrics - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + // Add finalizer first if not exist to avoid the race condition - // between init and delete + // between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) recResult = sreconcile.ResultRequeue return } - // Examine if the object is under deletion - // or if a type change has happened - if !obj.ObjectMeta.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) { - recResult, retErr = r.reconcileDelete(ctx, obj) + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil return } @@ -225,29 +225,46 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque r.reconcileSource, r.reconcileArtifact, } - recResult, retErr = r.reconcile(ctx, obj, reconcilers) + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) return } // reconcile iterates through the helmRepositoryReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) { +func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() - // Mark as reconciling if generation differs. - if obj.Generation != obj.Status.ObservedGeneration { - conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var reconcileAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + reconcileAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } } var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact // Run the sub-reconcilers and build the result of reconciliation. var res sreconcile.Result var resErr error for _, rec := range reconcilers { - recResult, err := rec(ctx, obj, &artifact, &chartRepo) + recResult, err := rec(ctx, sp, obj, &artifact, &chartRepo) // Exit immediately on ResultRequeue. if recResult == sreconcile.ResultRequeue { return sreconcile.ResultRequeue, nil @@ -263,19 +280,19 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1. res = sreconcile.LowestRequeuingResult(res, recResult) } - r.notify(ctx, oldObj, obj, chartRepo, res, resErr) + r.notify(ctx, oldObj, obj, &chartRepo, res, resErr) return res, resErr } // notify emits notification related to the reconciliation. -func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo repository.ChartRepository, res sreconcile.Result, resErr error) { +func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { annotations := map[string]string{ - sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision, - sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, } humanReadableSize := "unknown size" @@ -283,15 +300,10 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s humanReadableSize = fmt.Sprintf("size %s", units.HumanSize(float64(*size))) } - var oldChecksum string - if oldObj.GetArtifact() != nil { - oldChecksum = oldObj.GetArtifact().Checksum - } - message := fmt.Sprintf("stored fetched index of %s from '%s'", humanReadableSize, chartRepo.URL) // Notify on new artifact and failure recovery. - if oldChecksum != newObj.GetArtifact().Checksum { + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, "NewArtifact", message) ctrl.LoggerFrom(ctx).Info(message) @@ -299,8 +311,8 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s if sreconcile.FailureRecovery(oldObj, newObj, helmRepositoryFailConditions) { r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) } - ctrl.LoggerFrom(ctx).Info(message) } } } @@ -317,22 +329,50 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s // condition is added. // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. -func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { +func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.HelmRepository, _ *meta.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) - // Determine if the advertised artifact is still in storage - if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { - obj.Status.Artifact = nil - obj.Status.URL = "" - // Remove the condition as the artifact doesn't exist. - conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } } // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } return sreconcile.ResultSuccess, nil } @@ -345,136 +385,138 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *so } // reconcileSource attempts to fetch the Helm repository index using the -// specified configuration on the v1beta2.HelmRepository object. +// specified configuration on the v1.HelmRepository object. // -// When the fetch fails, it records v1beta2.FetchFailedCondition=True and +// When the fetch fails, it records v1.FetchFailedCondition=True and // returns early. // If successful and the index is valid, any previous -// v1beta2.FetchFailedCondition is removed, and the repository.ChartRepository +// v1.FetchFailedCondition is removed, and the repository.ChartRepository // pointer is set to the newly fetched index. -func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { - var tlsConfig *tls.Config - - // Configure Helm client to access repository - clientOpts := []helmgetter.Option{ - helmgetter.WithTimeout(obj.Spec.Timeout.Duration), - helmgetter.WithURL(obj.Spec.URL), - helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), - } - - // Configure any authentication related options - if obj.Spec.SecretRef != nil { - // Attempt to retrieve secret - name := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.Spec.SecretRef.Name, - } - var secret corev1.Secret - if err := r.Client.Get(ctx, name, &secret); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - return sreconcile.ResultEmpty, e - } +func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + // Ensure it's not an OCI URL. API validation ensures that only + // http/https/oci scheme are allowed. + if strings.HasPrefix(obj.Spec.URL, helmreg.OCIScheme) { + err := fmt.Errorf("'oci' URL scheme cannot be used with 'default' HelmRepository type") + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } - // Construct actual options - opts, err := getter.ClientOptionsFromSecret(secret) - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Return err as the content of the secret may change. - return sreconcile.ResultEmpty, e - } - clientOpts = append(clientOpts, opts...) + normalizedURL, err := repository.NormalizeURL(obj.Spec.URL) + if err != nil { + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } - tlsConfig, err = getter.TLSClientConfigFromSecret(secret, obj.Spec.URL) - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create TLS client config with secret data: %w", err), - Reason: sourcev1.AuthenticationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) - // Requeue as content of secret might change + clientOpts, _, err := getter.GetClientOpts(ctx, r.Client, obj, normalizedURL) + if err != nil { + if errors.Is(err, getter.ErrDeprecatedTLSConfig) { + ctrl.LoggerFrom(ctx). + Info("warning: specifying TLS authentication data via `.spec.secretRef` is deprecated, please use `.spec.certSecretRef` instead") + } else { + e := serror.NewGeneric( + err, + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } // Construct Helm chart repository with options and download index - newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, tlsConfig, clientOpts) + newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, clientOpts.TlsConfig, clientOpts.GetterOpts...) if err != nil { switch err.(type) { case *url.Error: - e := &serror.Stalling{ - Err: fmt.Errorf("invalid Helm repository URL: %w", err), - Reason: sourcev1.URLInvalidReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e default: - e := &serror.Stalling{ - Err: fmt.Errorf("failed to construct Helm client: %w", err), - Reason: meta.FailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewStalling( + fmt.Errorf("failed to construct Helm client: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } // Fetch the repository index from remote. - checksum, err := newChartRepo.CacheIndex() - if err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to fetch Helm repository index: %w", err), - Reason: meta.FailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + if err := newChartRepo.CacheIndex(); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to fetch Helm repository index: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) // Coin flip on transient or persistent error, return error and hope for the best return sreconcile.ResultEmpty, e } *chartRepo = *newChartRepo - // Short-circuit based on the fetched index being an exact match to the - // stored Artifact. This prevents having to unmarshal the YAML to calculate - // the (stable) revision, which is a memory expensive operation. - if obj.GetArtifact().HasChecksum(checksum) { - *artifact = *obj.GetArtifact() - conditions.Delete(obj, sourcev1.FetchFailedCondition) - return sreconcile.ResultSuccess, nil + // Early comparison to current Artifact. + if curArtifact := obj.GetArtifact(); curArtifact != nil { + curRev := digest.Digest(curArtifact.Revision) + if curRev.Validate() == nil { + // Short-circuit based on the fetched index being an exact match to the + // stored Artifact. + if newRev := chartRepo.Digest(curRev.Algorithm()); newRev.Validate() == nil && (newRev == curRev) { + *artifact = *curArtifact + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil + } + } } - // Load the cached repository index to ensure it passes validation. This - // also populates chartRepo.Checksum. - if err := chartRepo.LoadFromCache(); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to load Helm repository from cache: %w", err), - Reason: sourcev1.IndexationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + // Load the cached repository index to ensure it passes validation. + if err := chartRepo.LoadFromPath(); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to load Helm repository from index YAML: %w", err), + sourcev1.IndexationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + // Delete any stale failure observation + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Calculate revision. + revision := chartRepo.Digest(intdigest.Canonical) + if revision.Validate() != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to calculate revision: %w", err), + sourcev1.IndexationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Mark observations about the revision on the object. - if !obj.GetArtifact().HasRevision(chartRepo.Checksum) { - message := fmt.Sprintf("new index revision '%s'", checksum) - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) - conditions.MarkReconciling(obj, "NewRevision", message) + message := fmt.Sprintf("new index revision '%s'", revision) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) } // Create potential new artifact. - // Note: Since this is a potential artifact, artifact.Checksum is empty at - // this stage. It's populated when the artifact is written in storage. *artifact = r.Storage.NewArtifactFor(obj.Kind, obj.ObjectMeta.GetObjectMeta(), - chartRepo.Checksum, - fmt.Sprintf("index-%s.yaml", checksum)) - - // Delete any stale failure observation - conditions.Delete(obj, sourcev1.FetchFailedCondition) + revision.String(), + fmt.Sprintf("index-%s.yaml", revision.Encoded()), + ) return sreconcile.ResultSuccess, nil } @@ -483,95 +525,97 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sou // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Set the ArtifactInStorageCondition if there's no drift. defer func() { if obj.GetArtifact().HasRevision(artifact.Revision) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, - "stored artifact for revision '%s'", artifact.Revision) + "stored artifact: revision '%s'", artifact.Revision) } - - chartRepo.Unload() - - if err := chartRepo.RemoveCache(); err != nil { + if err := chartRepo.Clear(); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file") } }() - if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasChecksum(artifact.Checksum) { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) + if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasDigest(artifact.Digest) { + // Extend TTL of the Index in the cache (if present). + if r.Cache != nil { + r.Cache.SetExpiration(artifact.Path, r.TTL) + } + + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) return sreconcile.ResultSuccess, nil } // Create artifact dir if err := r.Storage.MkdirAll(*artifact); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("failed to create artifact directory: %w", err), - Reason: sourcev1.DirCreationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Acquire lock. unlock, err := r.Storage.Lock(*artifact) if err != nil { - return sreconcile.ResultEmpty, &serror.Event{ - Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), - Reason: meta.FailedReason, - } + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) } defer unlock() - // Save artifact to storage. - if err = r.Storage.CopyFromPath(artifact, chartRepo.CachePath); err != nil { - e := &serror.Event{ - Err: fmt.Errorf("unable to save artifact to storage: %w", err), - Reason: sourcev1.ArchiveOperationFailedReason, - } - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + // Save artifact to storage in JSON format. + b, err := chartRepo.ToJSON() + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to get JSON index from chart repo: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + if err = r.Storage.Copy(artifact, bytes.NewBuffer(b)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to save artifact to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Record it on the object. obj.Status.Artifact = artifact.DeepCopy() + // Cache the index if it was successfully retrieved. + if r.Cache != nil && chartRepo.Index != nil { + // The cache keys have to be safe in multi-tenancy environments, as + // otherwise it could be used as a vector to bypass the repository's + // authentication. Using the Artifact.Path is safe as the path is in + // the format of: ///. + if err := r.Cache.Set(artifact.Path, chartRepo.Index, r.TTL); err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err) + } + } + // Update index symlink. indexURL, err := r.Storage.Symlink(*artifact, "index.yaml") if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, "failed to update status URL symlink: %s", err) } if indexURL != "" { obj.Status.URL = indexURL } conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) - - // enable cache if applicable - if r.Cache != nil && chartRepo.IndexCache == nil { - chartRepo.SetMemCache(r.Storage.LocalPath(*artifact), r.Cache, r.TTL, func(event string) { - r.IncCacheEvents(event, obj.GetName(), obj.GetNamespace()) - }) - } - - // Cache the index if it was successfully retrieved - // and the chart was successfully built - if r.Cache != nil && chartRepo.Index != nil { - // The cache key have to be safe in multi-tenancy environments, - // as otherwise it could be used as a vector to bypass the helm repository's authentication. - // Using r.Storage.LocalPath(*repo.GetArtifact() is safe as the path is in the format ///. - err := chartRepo.CacheIndexInMemory() - if err != nil { - r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err) - } - } - return sreconcile.ResultSuccess, nil } @@ -590,6 +634,12 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) } + // Delete cache metrics. + if r.CacheRecorder != nil && r.Metrics.IsDelete(obj) { + r.DeleteCacheEvent(cache.CacheEventTypeHit, obj.Name, obj.Namespace) + r.DeleteCacheEvent(cache.CacheEventTypeMiss, obj.Name, obj.Namespace) + } + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -603,12 +653,12 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - return &serror.Event{ - Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), - Reason: "GarbageCollectionFailed", - } + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) } else if deleted != "" { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", "garbage collected artifacts for deleted resource") } // Clean status sub-resource @@ -621,14 +671,14 @@ func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sour if obj.GetArtifact() != nil { delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) if err != nil { - return &serror.Event{ - Err: fmt.Errorf("garbage collection of artifacts failed: %w", err), - Reason: "GarbageCollectionFailed", - } + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) } if len(delFiles) > 0 { - r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -650,3 +700,31 @@ func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Ob } r.Eventf(obj, eventType, reason, msg) } + +// migrateToStatic is HelmRepository OCI migration to static object. +func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository) (result ctrl.Result, err error) { + // Skip migration if suspended and not being deleted. + if obj.Spec.Suspend && obj.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + if !intpredicates.HelmRepositoryOCIRequireMigration(obj) { + // Already migrated, nothing to do. + return ctrl.Result{}, nil + } + + // Delete any artifact. + _, err = r.reconcileDelete(ctx, obj) + if err != nil { + return ctrl.Result{}, err + } + // Delete finalizer and reset the status. + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + obj.Status = sourcev1.HelmRepositoryStatus{} + + if err := sp.Patch(ctx, obj); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} diff --git a/internal/controller/helmrepository_controller_test.go b/internal/controller/helmrepository_controller_test.go new file mode 100644 index 000000000..d76c58a42 --- /dev/null +++ b/internal/controller/helmrepository_controller_test.go @@ -0,0 +1,1919 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/opencontainers/go-digest" + helmgetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/repo" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/secrets" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/cache" + "github.com/fluxcd/source-controller/internal/helm/repository" + intpredicates "github.com/fluxcd/source-controller/internal/predicates" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +func TestHelmRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "helmrepo-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + helmrepo := &sourcev1.HelmRepository{} + helmrepo.Name = "test-helmrepo" + helmrepo.Namespace = namespaceName + helmrepo.Spec = sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: "https://example.com", + } + // Add a test finalizer to prevent the object from getting deleted. + helmrepo.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, helmrepo)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, helmrepo)).NotTo(HaveOccurred()) + + r := &HelmRepositoryReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(helmrepo)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + origObj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + obj := origObj.DeepCopy() + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) +} + +func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var chartRepo repository.ChartRepository + var artifact meta.Artifact + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(context.TODO(), sp, obj, &artifact, &chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + protocol string + server options + url string + secret *corev1.Secret + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) + revFunc func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTPS with certSecretRef non-matching CA succeeds via system CA pool", + protocol: "http", + url: "https://stefanprodan.github.io/podinfo", + want: sreconcile.ResultSuccess, + wantErr: false, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + }, + { + name: "HTTPS with certSecretRef makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTPS with secretRef and caFile key makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + // Regression test for: https://github.com/fluxcd/source-controller/issues/1218 + name: "HTTPS with docker config secretRef and caFile key makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTP without secretRef makes ArtifactOutdated=True", + protocol: "http", + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + // Regression test for: https://github.com/fluxcd/source-controller/issues/1218 + name: "HTTP with docker config secretRef sets Reconciling=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTPS with invalid CAFile in certSecretRef makes FetchFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": []byte("invalid"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to construct Helm client's TLS config: failed to parse CA certificate"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Invalid URL makes FetchFailed=True and returns stalling error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Unsupported scheme makes FetchFailed=True and returns stalling error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Missing secret returns FetchFailed=True and returns error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Malformed secret returns FetchFailed=True and returns error", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "malformed-basic-auth", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("git"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secret 'default/malformed-basic-auth': malformed basic auth - has 'username' but missing 'password'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Stored index with same revision", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar") + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + + t.Expect(&artifact).To(BeEquivalentTo(obj.Status.Artifact)) + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Stored index with different revision", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + + t.Expect(artifact.Path).To(Not(BeEmpty())) + t.Expect(artifact.Revision).ToNot(Equal(obj.Status.Artifact.Revision)) + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + }, + } + + for _, tt := range tests { + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + Generation: 1, + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + + g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(server.GenerateIndex()).To(Succeed()) + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) + } + + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + server.Start() + defer server.Stop() + obj.Spec.URL = server.URL() + if tt.url != "" { + obj.Spec.URL = tt.url + } + case "https": + g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + defer server.Stop() + obj.Spec.URL = server.URL() + if tt.url != "" { + obj.Spec.URL = tt.url + } + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}) + + if secret != nil { + clientBuilder.WithObjects(secret.DeepCopy()) + } + + var rev digest.Digest + if tt.revFunc != nil { + rev = tt.revFunc(g, server, secret) + } + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + Storage: testStorage, + Getters: testGetters, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj) + } + + // Special handling for tests that need to set revision after calculation + if tt.name == "Stored index with same revision" && rev != "" { + obj.Status.Artifact = &meta.Artifact{ + Revision: rev.String(), + } + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var chartRepo repository.ChartRepository + var artifact meta.Artifact + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, &artifact, &chartRepo) + defer os.Remove(chartRepo.Path) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, artifact, &chartRepo) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + cache *cache.Cache + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes ArtifactInStorage=True and artifact is stored as JSON", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + b, err := os.ReadFile(localPath) + t.Expect(err).To(Not(HaveOccurred())) + t.Expect(json.Valid(b)).To(BeTrue()) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Archiving (loaded) artifact to storage adds to cache", + cache: cache.New(10, time.Minute), + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + index.Index = &repo.IndexFile{ + APIVersion: "v1", + Generated: time.Now(), + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) { + i, ok := cache.Get(obj.GetArtifact().Path) + t.Expect(ok).To(BeTrue()) + t.Expect(i).To(BeAssignableToTypeOf(&repo.IndexFile{})) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + Cache: tt.cache, + TTL: 1 * time.Minute, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Timeout: &metav1.Duration{Duration: timeout}, + URL: "https://example.com/index.yaml", + }, + } + + chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil) + g.Expect(err).ToNot(HaveOccurred()) + chartRepo.Index = &repo.IndexFile{} + + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + // Digest of the index file calculated by the ChartRepository. + artifact.Digest = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, chartRepo) + } + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(context.TODO(), sp, obj, &artifact, chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tt.cache) + } + }) + } +} + +func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmRepositoryReconcileFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc { + return func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmRepositoryReconcileFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmRepositoryReconcileFunc{ + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}). + Build(), + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmRepositoryStatus{ + ObservedGeneration: tt.observedGeneration, + }, + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + ctx := context.TODO() + sp := patch.NewSerialPatcher(obj, r.Client) + + gotRes, gotErr := r.reconcile(ctx, sp, obj, tt.reconcileFuncs) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + g.Expect(gotRes).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "positive conditions only", + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: false, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmrepo", + Namespace: "foo", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithObjects(obj). + WithStatusSubresource(&sourcev1.HelmRepository{}) + + c := clientBuilder.Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmRepositoryReadyCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmRepositoryReconciler_notify(t *testing.T) { + var aSize int64 = 30000 + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.HelmRepository) + newObjBeforeFunc func(obj *sourcev1.HelmRepository) + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact with nil size", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: nil} + }, + wantEvent: "Normal NewArtifact stored fetched index of unknown size", + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + }, + wantEvent: "Normal NewArtifact stored fetched index of size", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal Succeeded stored fetched index of size", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal NewArtifact stored fetched index of size", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.HelmRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &HelmRepositoryReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + chartRepo := repository.ChartRepository{ + URL: "some-address", + } + reconciler.notify(ctx, oldObj, newObj, &chartRepo, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Switch to a OCI helm repository type + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + obj.Spec.URL = fmt.Sprintf("oci://%s", testRegistryServer.registryHost) + + oldGen := obj.GetGeneration() + g.Expect(testEnv.Update(ctx, obj)).To(Succeed()) + newGen := oldGen + 1 + + // Wait for HelmRepository to become static for new generation. + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return newGen == obj.Generation && + !intpredicates.HelmRepositoryOCIRequireMigration(obj) + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_ReconcileSpecUpdatePredicateFilter(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Change spec Interval to validate spec update + obj.Spec.Interval = metav1.Duration{Duration: interval + time.Second} + oldGen := obj.GetGeneration() + g.Expect(testEnv.Update(ctx, obj)).To(Succeed()) + newGen := oldGen + 1 + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + newGen == readyCondition.ObservedGeneration && + newGen == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns = &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker = conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_InMemoryCaching(t *testing.T) { + g := NewWithT(t) + testCache.Clear() + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChartWithVersion("testdata/charts/helmchart", "0.1.0")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + ns, err := testEnv.CreateNamespace(ctx, "helmrepository") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + helmRepo := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: testServer.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, helmRepo)).To(Succeed()) + + key := client.ObjectKey{Name: helmRepo.Name, Namespace: helmRepo.Namespace} + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, helmRepo); err != nil { + return false + } + return len(helmRepo.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, helmRepo); err != nil { + return false + } + if !conditions.IsReady(helmRepo) || helmRepo.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(helmRepo, meta.ReadyCondition) + return helmRepo.Generation == readyCondition.ObservedGeneration && + helmRepo.Generation == helmRepo.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + err = testEnv.Get(ctx, key, helmRepo) + g.Expect(err).ToNot(HaveOccurred()) + _, cacheHit := testCache.Get(helmRepo.GetArtifact().Path) + g.Expect(cacheHit).To(BeTrue()) + + g.Expect(testEnv.Delete(ctx, helmRepo)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, helmRepo); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_ociMigration(t *testing.T) { + g := NewWithT(t) + + testns, err := testEnv.CreateNamespace(ctx, "hr-oci-migration-test") + g.Expect(err).ToNot(HaveOccurred()) + + t.Cleanup(func() { + g.Expect(testEnv.Cleanup(ctx, testns)).ToNot(HaveOccurred()) + }) + + hr := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("hr-%s", randStringRunes(5)), + Namespace: testns.Name, + }, + } + hrKey := client.ObjectKeyFromObject(hr) + + // Migrates newly created object with finalizer. + + hr.ObjectMeta.Finalizers = append(hr.ObjectMeta.Finalizers, "foo.bar", sourcev1.SourceFinalizer) + hr.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + URL: "oci://foo/bar", + Interval: metav1.Duration{Duration: interval}, + } + g.Expect(testEnv.Create(ctx, hr)).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + _ = testEnv.Get(ctx, hrKey, hr) + return !intpredicates.HelmRepositoryOCIRequireMigration(hr) + }, timeout, time.Second).Should(BeTrue()) + + // Migrates updated object with finalizer. + + patchHelper, err := patch.NewHelper(hr, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + hr.ObjectMeta.Finalizers = append(hr.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) + hr.Spec.URL = "oci://foo/baz" + g.Expect(patchHelper.Patch(ctx, hr)).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + _ = testEnv.Get(ctx, hrKey, hr) + return !intpredicates.HelmRepositoryOCIRequireMigration(hr) + }, timeout, time.Second).Should(BeTrue()) + + // Migrates deleted object with finalizer. + + patchHelper, err = patch.NewHelper(hr, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + + // Suspend the object to prevent finalizer from getting removed. + // Ensure only flux finalizer is set to allow the object to be garbage + // collected at the end. + // NOTE: Suspending and updating finalizers are done separately here as + // doing them in a single patch results in flaky test where the finalizer + // update doesn't gets registered with the kube-apiserver, resulting in + // timeout waiting for finalizer to appear on the object below. + hr.Spec.Suspend = true + g.Expect(patchHelper.Patch(ctx, hr)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + _ = k8sClient.Get(ctx, hrKey, hr) + return hr.Spec.Suspend == true + }, timeout).Should(BeTrue()) + + patchHelper, err = patch.NewHelper(hr, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + + // Add finalizer and verify that finalizer exists on the object using a live + // client. + hr.ObjectMeta.Finalizers = []string{sourcev1.SourceFinalizer} + g.Expect(patchHelper.Patch(ctx, hr)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + _ = k8sClient.Get(ctx, hrKey, hr) + return controllerutil.ContainsFinalizer(hr, sourcev1.SourceFinalizer) + }, timeout).Should(BeTrue()) + + // Delete the object and verify. + g.Expect(testEnv.Delete(ctx, hr)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, hrKey, hr); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} diff --git a/internal/controller/ocirepository_controller.go b/internal/controller/ocirepository_controller.go new file mode 100644 index 000000000..a91c8a51b --- /dev/null +++ b/internal/controller/ocirepository_controller.go @@ -0,0 +1,1383 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + cryptotls "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/google/go-containerregistry/pkg/name" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/sigstore/cosign/v2/pkg/cosign" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/oci" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" + "github.com/fluxcd/pkg/sourceignore" + "github.com/fluxcd/pkg/tar" + "github.com/fluxcd/pkg/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + soci "github.com/fluxcd/source-controller/internal/oci" + scosign "github.com/fluxcd/source-controller/internal/oci/cosign" + "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" +) + +// ociRepositoryReadyCondition contains the information required to summarize a +// v1.OCIRepository Ready Condition. +var ociRepositoryReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// ociRepositoryFailConditions contains the conditions that represent a failure. +var ociRepositoryFailConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.StorageOperationFailedCondition, +} + +type filterFunc func(tags []string) ([]string, error) + +type invalidOCIURLError struct { + err error +} + +func (e invalidOCIURLError) Error() string { + return e.err.Error() +} + +// ociRepositoryReconcileFunc is the function type for all the v1.OCIRepository +// (sub)reconcile functions. The type implementations are grouped and +// executed serially to perform the complete reconcile of the object. +type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) + +// OCIRepositoryReconciler reconciles a v1.OCIRepository object +type OCIRepositoryReconciler struct { + client.Client + helper.Metrics + kuberecorder.EventRecorder + + Storage *storage.Storage + ControllerName string + TokenCache *cache.TokenCache + requeueDependency time.Duration + + patchOptions []patch.Option +} + +type OCIRepositoryReconcilerOptions struct { + DependencyRequeueInterval time.Duration + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OCIRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, OCIRepositoryReconcilerOptions{}) +} + +func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts OCIRepositoryReconcilerOptions) error { + r.patchOptions = getPatchOptions(ociRepositoryReadyCondition.Owned, r.ControllerName) + + r.requeueDependency = opts.DependencyRequeueInterval + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.OCIRepository{}, builder.WithPredicates( + predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), + )). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create + +func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the OCIRepository + obj := &sourcev1.OCIRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(ociRepositoryReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition between init + // and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []ociRepositoryReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the ociRepositoryReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var reconcileAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + reconcileAtVal = v + } + + // Persist reconciling status if generation differs or reconciliation is + // requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + // Create temp working dir + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer func() { + if err = os.RemoveAll(tmpDir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") + } + }() + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + + var ( + res sreconcile.Result + resErr error + metadata = meta.Artifact{} + ) + + // Run the sub-reconcilers and build the result of reconciliation. + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, &metadata, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, res, resErr) + + return res, resErr +} + +// reconcileSource fetches the upstream OCI artifact metadata and content. +// If this fails, it records v1.FetchFailedCondition=True on the object and returns early. +func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { + var authenticator authn.Authenticator + + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + // Remove previously failed source verification status conditions. The + // failing verification should be recalculated. But an existing successful + // verification need not be removed as it indicates verification of previous + // version. + if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + + // Generate the registry credential keychain either from static credentials or using cloud OIDC + keychain, err := r.keychain(ctx, obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get credential: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + var proxyURL *url.URL + if obj.Spec.ProxySecretRef != nil { + var err error + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + }) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get proxy address: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider && ok { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + return sreconcile.ResultEmpty, serror.NewStalling(err, meta.FeatureGateDisabledReason) + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.OCIRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } + var authErr error + authenticator, authErr = soci.OIDCAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider, opts...) + if authErr != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Generate the transport for remote operations + transport, err := r.transport(ctx, obj, proxyURL) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to generate transport for '%s': %w", obj.Spec.URL, err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + opts := makeRemoteOptions(ctx, transport, keychain, authenticator) + + // Determine which artifact revision to pull + ref, err := r.getArtifactRef(obj, opts) + if err != nil { + if _, ok := err.(invalidOCIURLError); ok { + e := serror.NewStalling( + fmt.Errorf("URL validation failed for '%s': %w", obj.Spec.URL, err), + sourcev1.URLInvalidReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + e := serror.NewGeneric( + fmt.Errorf("failed to determine the artifact tag for '%s': %w", obj.Spec.URL, err), + sourcev1.ReadOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Get the upstream revision from the artifact digest + // TODO: getRevision resolves the digest, which may change before image is fetched, so it should probaly update ref + revision, err := r.getRevision(ref, opts) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to determine artifact digest: %w", err), + sourcev1.OCIPullFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + metaArtifact := &meta.Artifact{Revision: revision} + metaArtifact.DeepCopyInto(metadata) + + // Mark observations about the revision on the object + defer func() { + if !obj.GetArtifact().HasRevision(revision) { + message := fmt.Sprintf("new revision '%s' for '%s'", revision, ref) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + return + } + } + }() + + // Verify artifact if: + // - the upstream digest differs from the one in storage (revision drift) + // - the OCIRepository spec has changed (generation drift) + // - the previous reconciliation resulted in a failed artifact verification (retry with exponential backoff) + if obj.Spec.Verify == nil { + // Remove old observations if verification was disabled + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } else if !obj.GetArtifact().HasRevision(revision) || + conditions.GetObservedGeneration(obj, sourcev1.SourceVerifiedCondition) != obj.Generation || + conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + + result, err := r.verifySignature(ctx, obj, ref, keychain, authenticator, transport, opts...) + if err != nil { + provider := obj.Spec.Verify.Provider + if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { + provider = fmt.Sprintf("%s keyless", provider) + } + e := serror.NewGeneric( + fmt.Errorf("failed to verify the signature using provider '%s': %w", provider, err), + sourcev1.VerificationError, + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + if result == soci.VerificationResultSuccess { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision %s", revision) + } + } + + // Skip pulling if the artifact revision and the source configuration has + // not changed. + if obj.GetArtifact().HasRevision(revision) && !ociContentConfigChanged(obj) { + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil + } + + // Pull artifact from the remote container registry + img, err := remote.Image(ref, opts...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err), + sourcev1.OCIPullFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Copy the OCI annotations to the internal artifact metadata + manifest, err := img.Manifest() + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to parse artifact manifest: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + metadata.Metadata = manifest.Annotations + + // Extract the compressed content from the selected layer + blob, err := r.selectLayer(obj, img) + if err != nil { + e := serror.NewGeneric(err, sourcev1.OCILayerOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Persist layer content to storage using the specified operation + switch obj.GetLayerOperation() { + case sourcev1.OCILayerExtract: + if err = tar.Untar(blob, dir, tar.WithMaxUntarSize(-1), tar.WithSkipSymlinks()); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to extract layer contents from artifact: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + case sourcev1.OCILayerCopy: + metadata.Path = fmt.Sprintf("%s.tgz", r.digestFromRevision(metadata.Revision)) + file, err := os.Create(filepath.Join(dir, metadata.Path)) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create file to copy layer to: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer file.Close() + + _, err = io.Copy(file, blob) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to copy layer from artifact: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + default: + e := serror.NewGeneric( + fmt.Errorf("unsupported layer operation: %s", obj.GetLayerOperation()), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// selectLayer finds the matching layer and returns its compressed contents. +// If no layer selector was provided, we pick the first layer from the OCI artifact. +func (r *OCIRepositoryReconciler) selectLayer(obj *sourcev1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) { + layers, err := image.Layers() + if err != nil { + return nil, fmt.Errorf("failed to parse artifact layers: %w", err) + } + + if len(layers) < 1 { + return nil, fmt.Errorf("no layers found in artifact") + } + + var layer gcrv1.Layer + switch { + case obj.GetLayerMediaType() != "": + var found bool + for i, l := range layers { + md, err := l.MediaType() + if err != nil { + return nil, fmt.Errorf("failed to determine the media type of layer[%v] from artifact: %w", i, err) + } + if string(md) == obj.GetLayerMediaType() { + layer = layers[i] + found = true + break + } + } + if !found { + return nil, fmt.Errorf("failed to find layer with media type '%s' in artifact", obj.GetLayerMediaType()) + } + default: + layer = layers[0] + } + + blob, err := layer.Compressed() + if err != nil { + return nil, fmt.Errorf("failed to extract the first layer from artifact: %w", err) + } + + return blob, nil +} + +// getRevision fetches the upstream digest, returning the revision in the +// format '@'. +func (r *OCIRepositoryReconciler) getRevision(ref name.Reference, options []remote.Option) (string, error) { + switch ref := ref.(type) { + case name.Digest: + digest, err := gcrv1.NewHash(ref.DigestStr()) + if err != nil { + return "", err + } + return digest.String(), nil + case name.Tag: + var digest gcrv1.Hash + + desc, err := remote.Head(ref, options...) + if err == nil { + digest = desc.Digest + } else { + rdesc, err := remote.Get(ref, options...) + if err != nil { + return "", err + } + digest = rdesc.Descriptor.Digest + } + return fmt.Sprintf("%s@%s", ref.TagStr(), digest.String()), nil + default: + return "", fmt.Errorf("unsupported reference type: %T", ref) + } +} + +// digestFromRevision extracts the digest from the revision string. +func (r *OCIRepositoryReconciler) digestFromRevision(revision string) string { + parts := strings.Split(revision, "@") + return parts[len(parts)-1] +} + +// verifySignature verifies the authenticity of the given image reference URL. +// It supports two different verification providers: cosign and notation. +// First, it tries to use a key if a Secret with a valid public key is provided. +// If not, when using cosign it falls back to a keyless approach for verification. +// When notation is used, a trust policy is required to verify the image. +// The verification result is returned as a VerificationResult and any error encountered. +func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.OCIRepository, + ref name.Reference, keychain authn.Keychain, auth authn.Authenticator, + transport *http.Transport, opt ...remote.Option) (soci.VerificationResult, error) { + + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + provider := obj.Spec.Verify.Provider + switch provider { + case "cosign": + defaultCosignOciOpts := []scosign.Options{ + scosign.WithRemoteOptions(opt...), + } + + // get the public keys from the given secret + if secretRef := obj.Spec.Verify.SecretRef; secretRef != nil { + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctxTimeout, verifySecret) + if err != nil { + return soci.VerificationResultFailed, err + } + + signatureVerified := soci.VerificationResultFailed + for k, data := range pubSecret.Data { + // search for public keys in the secret + if strings.HasSuffix(k, ".pub") { + verifier, err := scosign.NewCosignVerifier(ctxTimeout, append(defaultCosignOciOpts, scosign.WithPublicKey(data))...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil || result == soci.VerificationResultFailed { + continue + } + + if result == soci.VerificationResultSuccess { + signatureVerified = result + break + } + } + } + + if signatureVerified == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return soci.VerificationResultSuccess, nil + } + + // if no secret is provided, try keyless verification + ctrl.LoggerFrom(ctx).Info("no secret reference is provided, trying to verify the image using keyless method") + + var identities []cosign.Identity + for _, match := range obj.Spec.Verify.MatchOIDCIdentity { + identities = append(identities, cosign.Identity{ + IssuerRegExp: match.Issuer, + SubjectRegExp: match.Subject, + }) + } + defaultCosignOciOpts = append(defaultCosignOciOpts, scosign.WithIdentities(identities)) + + verifier, err := scosign.NewCosignVerifier(ctxTimeout, defaultCosignOciOpts...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil { + return soci.VerificationResultFailed, err + } + + if result == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return soci.VerificationResultSuccess, nil + + case "notation": + // get the public keys from the given secret + secretRef := obj.Spec.Verify.SecretRef + + if secretRef == nil { + return soci.VerificationResultFailed, fmt.Errorf("verification secret cannot be empty: '%s'", ref) + } + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctxTimeout, verifySecret) + if err != nil { + return soci.VerificationResultFailed, err + } + + data, ok := pubSecret.Data[notation.DefaultTrustPolicyKey] + if !ok { + return soci.VerificationResultFailed, fmt.Errorf("'%s' not found in secret '%s'", notation.DefaultTrustPolicyKey, verifySecret.String()) + } + + var doc trustpolicy.Document + + if err := json.Unmarshal(data, &doc); err != nil { + return soci.VerificationResultFailed, fmt.Errorf("error occurred while parsing %s: %w", notation.DefaultTrustPolicyKey, err) + } + + var certs [][]byte + + for k, data := range pubSecret.Data { + if strings.HasSuffix(k, ".crt") || strings.HasSuffix(k, ".pem") { + certs = append(certs, data) + } + } + + if certs == nil { + return soci.VerificationResultFailed, fmt.Errorf("no certificates found in secret '%s'", verifySecret.String()) + } + + trustPolicy := notation.CleanTrustPolicy(&doc, ctrl.LoggerFrom(ctx)) + defaultNotationOciOpts := []notation.Options{ + notation.WithTrustPolicy(trustPolicy), + notation.WithRemoteOptions(opt...), + notation.WithAuth(auth), + notation.WithKeychain(keychain), + notation.WithInsecureRegistry(obj.Spec.Insecure), + notation.WithLogger(ctrl.LoggerFrom(ctx)), + notation.WithRootCertificates(certs), + notation.WithTransport(transport), + } + + verifier, err := notation.NewNotationVerifier(defaultNotationOciOpts...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil { + return result, err + } + + if result == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return result, nil + default: + return soci.VerificationResultFailed, fmt.Errorf("unsupported verification provider: %s", obj.Spec.Verify.Provider) + } +} + +// retrieveSecret retrieves a secret from the specified namespace with the given secret name. +// It returns the retrieved secret and any error encountered during the retrieval process. +func (r *OCIRepositoryReconciler) retrieveSecret(ctx context.Context, verifySecret types.NamespacedName) (corev1.Secret, error) { + var pubSecret corev1.Secret + + if err := r.Get(ctx, verifySecret, &pubSecret); err != nil { + return corev1.Secret{}, err + } + return pubSecret, nil +} + +// parseRepository validates and extracts the repository URL. +func (r *OCIRepositoryReconciler) parseRepository(obj *sourcev1.OCIRepository) (name.Repository, error) { + if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) { + return name.Repository{}, fmt.Errorf("URL must be in format 'oci:////'") + } + + url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) + + options := []name.Option{} + if obj.Spec.Insecure { + options = append(options, name.Insecure) + } + repo, err := name.NewRepository(url, options...) + if err != nil { + return name.Repository{}, err + } + + imageName := strings.TrimPrefix(url, repo.RegistryStr()) + if s := strings.Split(imageName, ":"); len(s) > 1 { + return name.Repository{}, fmt.Errorf("URL must not contain a tag; remove ':%s'", s[1]) + } + + return repo, nil +} + +// getArtifactRef determines which tag or revision should be used and returns the OCI artifact FQN. +func (r *OCIRepositoryReconciler) getArtifactRef(obj *sourcev1.OCIRepository, options []remote.Option) (name.Reference, error) { + repo, err := r.parseRepository(obj) + if err != nil { + return nil, invalidOCIURLError{err} + } + + if obj.Spec.Reference != nil { + if obj.Spec.Reference.Digest != "" { + return repo.Digest(obj.Spec.Reference.Digest), nil + } + + if obj.Spec.Reference.SemVer != "" { + return r.getTagBySemver(repo, obj.Spec.Reference.SemVer, filterTags(obj.Spec.Reference.SemverFilter), options) + } + + if obj.Spec.Reference.Tag != "" { + return repo.Tag(obj.Spec.Reference.Tag), nil + } + } + + return repo.Tag(name.DefaultTag), nil +} + +// getTagBySemver call the remote container registry, fetches all the tags from the repository, +// and returns the latest tag according to the semver expression. +func (r *OCIRepositoryReconciler) getTagBySemver(repo name.Repository, exp string, filter filterFunc, options []remote.Option) (name.Reference, error) { + tags, err := remote.List(repo, options...) + if err != nil { + return nil, err + } + + validTags, err := filter(tags) + if err != nil { + return nil, err + } + + constraint, err := semver.NewConstraint(exp) + if err != nil { + return nil, fmt.Errorf("semver '%s' parse error: %w", exp, err) + } + + var matchingVersions []*semver.Version + for _, t := range validTags { + v, err := version.ParseVersion(t) + if err != nil { + continue + } + + if constraint.Check(v) { + matchingVersions = append(matchingVersions, v) + } + } + + if len(matchingVersions) == 0 { + return nil, fmt.Errorf("no match found for semver: %s", exp) + } + + sort.Sort(sort.Reverse(semver.Collection(matchingVersions))) + return repo.Tag(matchingVersions[0].Original()), nil +} + +// keychain generates the credential keychain based on the resource +// configuration. If no auth is specified a default keychain with +// anonymous access is returned +func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) { + var imagePullSecrets []corev1.Secret + + // lookup auth secret + if obj.Spec.SecretRef != nil { + var imagePullSecret corev1.Secret + secretRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.SecretRef.Name} + err := r.Get(ctx, secretRef, &imagePullSecret) + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.AuthenticationFailedReason, + "auth secret '%s' not found", obj.Spec.SecretRef.Name) + return nil, fmt.Errorf("failed to get secret '%s': %w", secretRef, err) + } + imagePullSecrets = append(imagePullSecrets, imagePullSecret) + } + + // lookup service account + if obj.Spec.ServiceAccountName != "" { + saRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.ServiceAccountName} + saSecrets, err := secrets.PullSecretsFromServiceAccountRef(ctx, r.Client, saRef) + if err != nil { + return nil, err + } + imagePullSecrets = append(imagePullSecrets, saSecrets...) + } + + // if no pullsecrets available return an AnonymousKeychain + if len(imagePullSecrets) == 0 { + return soci.Anonymous{}, nil + } + + return k8schain.NewFromPullSecrets(ctx, imagePullSecrets) +} + +// transport clones the default transport from remote and when a certSecretRef is specified, +// the returned transport will include the TLS client and/or CA certificates. +// If the insecure flag is set, the transport will skip the verification of the server's certificate. +// Additionally, if a proxy is specified, transport will use it. +func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository, proxyURL *url.URL) (*http.Transport, error) { + transport := remote.DefaultTransport.(*http.Transport).Clone() + + tlsConfig, err := r.getTLSConfig(ctx, obj) + if err != nil { + return nil, err + } + if tlsConfig != nil { + transport.TLSClientConfig = tlsConfig + } + + if proxyURL != nil { + transport.Proxy = http.ProxyURL(proxyURL) + } + + return transport, nil +} + +// getTLSConfig gets the TLS configuration for the transport based on the +// specified secret reference in the OCIRepository object, or the insecure flag. +func (r *OCIRepositoryReconciler) getTLSConfig(ctx context.Context, obj *sourcev1.OCIRepository) (*cryptotls.Config, error) { + if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" { + if obj.Spec.Insecure { + // NOTE: This is the only place in Flux where InsecureSkipVerify is allowed. + // This exception is made for OCIRepository to maintain backward compatibility + // with tools like crane that require insecure connections without certificates. + // This only applies when no CertSecretRef is provided AND insecure is explicitly set. + // All other controllers must NOT allow InsecureSkipVerify per our security policy. + return &cryptotls.Config{ + InsecureSkipVerify: true, + }, nil + } + return nil, nil + } + + secretName := types.NamespacedName{ + Namespace: obj.Namespace, + Name: obj.Spec.CertSecretRef.Name, + } + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures source-controller continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + return secrets.TLSConfigFromSecretRef(ctx, r.Client, secretName, tlsOpts...) +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of any URL in the Status of the object are updated, to ensure +// they match the Storage server hostname of current runtime. +func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.OCIRepository, _ *meta.Artifact, _ string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact does not differ from the object's current, it returns +// early. +// On a successful archive, the Artifact in the Status of the object is set, +// and the symlink in the Storage is updated to its path. +func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { + // Create artifact + artifact := r.Storage.NewArtifactFor(obj.Kind, obj, metadata.Revision, + fmt.Sprintf("%s.tar.gz", r.digestFromRevision(metadata.Revision))) + + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) && !ociContentConfigChanged(obj) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact for digest '%s'", artifact.Revision) + } + }() + + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) && !ociContentConfigChanged(obj) { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, + "artifact up-to-date with remote revision: '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to stat source path: %w", err), + sourcev1.StatOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } else if !f.IsDir() { + e := serror.NewGeneric( + fmt.Errorf("source path '%s' is not a directory", dir), + sourcev1.InvalidPathReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) + } + defer unlock() + + switch obj.GetLayerOperation() { + case sourcev1.OCILayerCopy: + if err = r.Storage.CopyFromPath(&artifact, filepath.Join(dir, metadata.Path)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to copy artifact to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + default: + // Load ignore rules for archiving. + ignoreDomain := strings.Split(dir, string(filepath.Separator)) + ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to load source ignore patterns from repository: %w", err), + "SourceIgnoreError", + ) + } + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) + } + + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to archive artifact to storage: %s", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Record the observations on the object. + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.Artifact.Metadata = metadata.Metadata + obj.Status.ObservedIgnore = obj.Spec.Ignore + obj.Status.ObservedLayerSelector = obj.Spec.LayerSelector + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to update status URL symlink: %s", err) + } + if url != "" { + obj.Status.URL = url + } + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.OCIRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless the +// deletion timestamp on the object is set. Which will result in the +// removal of all Artifacts for the objects. +func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// notify emits notification related to the reconciliation. +func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact and recovery from any + // failure. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + message := fmt.Sprintf("stored artifact with revision '%s' from '%s'", newObj.Status.Artifact.Revision, newObj.Spec.URL) + + // enrich message with upstream annotations if found + if info := newObj.GetArtifact().Metadata; info != nil { + var source, revision string + if val, ok := info[oci.SourceAnnotation]; ok { + source = val + } + if val, ok := info[oci.RevisionAnnotation]; ok { + revision = val + } + if source != "" && revision != "" { + message = fmt.Sprintf("%s, origin source '%s', origin revision '%s'", message, source, revision) + } + } + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + "NewArtifact", message) + ctrl.LoggerFrom(ctx).Info(message) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, ociRepositoryFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) + } + } + } +} + +// makeRemoteOptions returns a remoteOptions struct with the authentication and transport options set. +// The returned struct can be used to interact with a remote registry using go-containerregistry based libraries. +func makeRemoteOptions(ctxTimeout context.Context, transport http.RoundTripper, + keychain authn.Keychain, auth authn.Authenticator) remoteOptions { + + authOption := remote.WithAuthFromKeychain(keychain) + if auth != nil { + // auth take precedence over keychain here as we expect the caller to set + // the auth only if it is required. + authOption = remote.WithAuth(auth) + } + return remoteOptions{ + remote.WithContext(ctxTimeout), + remote.WithUserAgent(oci.UserAgent), + remote.WithTransport(transport), + authOption, + } +} + +// remoteOptions contains the options to interact with a remote registry. +// It can be used to pass options to go-containerregistry based libraries. +type remoteOptions []remote.Option + +// ociContentConfigChanged evaluates the current spec with the observations +// of the artifact in the status to determine if artifact content configuration +// has changed and requires rebuilding the artifact. +func ociContentConfigChanged(obj *sourcev1.OCIRepository) bool { + if !ptr.Equal(obj.Spec.Ignore, obj.Status.ObservedIgnore) { + return true + } + + if !layerSelectorEqual(obj.Spec.LayerSelector, obj.Status.ObservedLayerSelector) { + return true + } + + return false +} + +// Returns true if both arguments are nil or both arguments +// dereference to the same value. +// Based on k8s.io/utils/pointer/pointer.go pointer value equality. +func layerSelectorEqual(a, b *sourcev1.OCILayerSelector) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +func filterTags(filter string) filterFunc { + return func(tags []string) ([]string, error) { + if filter == "" { + return tags, nil + } + + match, err := regexp.Compile(filter) + if err != nil { + return nil, err + } + + validTags := []string{} + for _, tag := range tags { + if match.MatchString(tag) { + validTags = append(validTags, tag) + } + } + return validTags, nil + } +} diff --git a/internal/controller/ocirepository_controller_test.go b/internal/controller/ocirepository_controller_test.go new file mode 100644 index 000000000..6ea35e962 --- /dev/null +++ b/internal/controller/ocirepository_controller_test.go @@ -0,0 +1,3709 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/crane" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-core-go/signature/cose" + "github.com/notaryproject/notation-core-go/testhelper" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/registry" + "github.com/notaryproject/notation-go/signer" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" + "github.com/sigstore/cosign/v2/pkg/cosign" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + oras "oras.land/oras-go/v2/registry/remote" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/oci" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/tar" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + snotation "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + testproxy "github.com/fluxcd/source-controller/tests/proxy" +) + +func TestOCIRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "ocirepo-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + ocirepo := &sourcev1.OCIRepository{} + ocirepo.Name = "test-ocirepo" + ocirepo.Namespace = namespaceName + ocirepo.Spec = sourcev1.OCIRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: "oci://example.com", + } + // Add a test finalizer to prevent the object from getting deleted. + ocirepo.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, ocirepo)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, ocirepo)).NotTo(HaveOccurred()) + + r := &OCIRepositoryReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(ocirepo)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestOCIRepository_Reconcile(t *testing.T) { + g := NewWithT(t) + + // Registry server with public images + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + t.Cleanup(func() { + regServer.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(regServer.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + tests := []struct { + name string + url string + tag string + semver string + revision string + mediaType string + operation string + assertArtifact []artifactFixture + }{ + { + name: "public tag", + url: podinfoVersions["6.1.6"].url, + tag: podinfoVersions["6.1.6"].tag, + revision: fmt.Sprintf("%s@%s", podinfoVersions["6.1.6"].tag, podinfoVersions["6.1.6"].digest.String()), + mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + operation: sourcev1.OCILayerCopy, + assertArtifact: []artifactFixture{ + { + expectedPath: "kustomize/deployment.yaml", + expectedChecksum: "6fd625effe6bb805b6a78943ee082a4412e763edb7fcaed6e8fe644d06cbf423", + }, + { + expectedPath: "kustomize/hpa.yaml", + expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9", + }, + }, + }, + { + name: "public semver", + url: podinfoVersions["6.1.5"].url, + semver: ">= 6.1 <= 6.1.5", + revision: fmt.Sprintf("%s@%s", podinfoVersions["6.1.5"].tag, podinfoVersions["6.1.5"].digest.String()), + assertArtifact: []artifactFixture{ + { + expectedPath: "kustomize/deployment.yaml", + expectedChecksum: "dce4f5f780a8e8994b06031e5b567bf488ceaaaabd9bd3fc278b4f3bfc8c577b", + }, + { + expectedPath: "kustomize/hpa.yaml", + expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-reconcile-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + origObj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{}, + Insecure: true, + }, + } + obj := origObj.DeepCopy() + + if tt.tag != "" { + obj.Spec.Reference.Tag = tt.tag + } + if tt.semver != "" { + obj.Spec.Reference.SemVer = tt.semver + } + if tt.mediaType != "" { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: tt.mediaType} + + if tt.operation != "" { + obj.Spec.LayerSelector.Operation = tt.operation + } + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the revision matches the expected revision + g.Expect(obj.Status.Artifact.Revision).To(Equal(tt.revision)) + + // Check if the metadata matches the expected annotations + g.Expect(obj.Status.Artifact.Metadata[oci.SourceAnnotation]).To(ContainSubstring("podinfo")) + g.Expect(obj.Status.Artifact.Metadata[oci.RevisionAnnotation]).To(ContainSubstring(tt.tag)) + + // Check if the artifact storage path matches the expected file path + localPath := testStorage.LocalPath(*obj.Status.Artifact) + t.Logf("artifact local path: %s", localPath) + + f, err := os.Open(localPath) + g.Expect(err).ToNot(HaveOccurred()) + defer f.Close() + + // create a tmp directory to extract artifact + tmp, err := os.MkdirTemp("", "ocirepository-test-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmp) + + err = tar.Untar(f, tmp, tar.WithMaxUntarSize(-1)) + g.Expect(err).ToNot(HaveOccurred()) + + for _, af := range tt.assertArtifact { + expectedFile := filepath.Join(tmp, af.expectedPath) + g.Expect(expectedFile).To(BeAnExistingFile()) + + f2, err := os.Open(expectedFile) + g.Expect(err).ToNot(HaveOccurred()) + defer f2.Close() + + d, err := intdigest.Canonical.FromReader(f2) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(d.Encoded()).To(Equal(af.expectedChecksum)) + } + + // Check if the object status is valid + condns := &conditionscheck.Conditions{NegativePolarity: ociRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) + }) + } +} + +func TestOCIRepository_Reconcile_MediaType(t *testing.T) { + g := NewWithT(t) + + // Registry server with public images + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + t.Cleanup(func() { + regServer.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(regServer.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + tests := []struct { + name string + url string + tag string + mediaType string + wantErr bool + }{ + { + name: "Works with no media type", + url: podinfoVersions["6.1.4"].url, + tag: podinfoVersions["6.1.4"].tag, + }, + { + name: "Works with Flux CLI media type", + url: podinfoVersions["6.1.5"].url, + tag: podinfoVersions["6.1.5"].tag, + mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + name: "Fails with unknown media type", + url: podinfoVersions["6.1.6"].url, + tag: podinfoVersions["6.1.6"].tag, + mediaType: "application/invalid.tar.gzip", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-mediatype-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{ + Tag: tt.tag, + }, + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: tt.mediaType, + }, + Insecure: true, + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be reconciled + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition != nil && !conditions.IsUnknown(obj, meta.ReadyCondition) + }, timeout).Should(BeTrue()) + + g.Expect(conditions.IsReady(obj)).To(BeIdenticalTo(!tt.wantErr)) + if tt.wantErr { + g.Expect(conditions.Get(obj, meta.ReadyCondition).Message).Should(ContainSubstring("failed to find layer with media type")) + } + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { + type secretOptions struct { + username string + password string + includeSA bool + includeSecret bool + } + + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(tlsCA) + + tests := []struct { + name string + url string + registryOpts registryOptions + craneOpts []crane.Option + secretOpts secretOptions + tlsCertSecret *corev1.Secret + insecure bool + provider string + providerImg string + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without basic auth", + want: sreconcile.ResultSuccess, + craneOpts: []crane.Option{crane.Insecure}, + insecure: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTP with basic auth secret", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + includeSecret: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTP with serviceaccount", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + includeSA: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTP registry - basic auth with missing secret", + want: sreconcile.ResultEmpty, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + wantErr: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), + }, + }, + { + name: "HTTP registry - basic auth with invalid secret", + want: sreconcile.ResultEmpty, + wantErr: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: "wrong-pass", + password: "wrong-pass", + includeSecret: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), + }, + }, + { + name: "HTTP registry - basic auth with invalid serviceaccount", + want: sreconcile.ResultEmpty, + wantErr: true, + insecure: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: "wrong-pass", + password: "wrong-pass", + includeSA: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), + }, + }, + { + name: "HTTPS with valid certfile", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTPS with valid certfile using deprecated keys", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTPS without certfile", + want: sreconcile.ResultEmpty, + wantErr: true, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), + }, + }, + { + name: "HTTPS with invalid certfile", + want: sreconcile.ResultEmpty, + wantErr: true, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": []byte("invalid"), + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to parse CA certificate"), + }, + }, + { + name: "HTTPS with certfile using both caFile and ca.crt ignores caFile", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "caFile": []byte("invalid"), + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + { + name: "with contextual login provider", + wantErr: true, + provider: "aws", + providerImg: "oci://123456789000.dkr.ecr.us-east-2.amazonaws.com/test", + craneOpts: []crane.Option{ + crane.Insecure, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to get credential from"), + }, + }, + { + name: "secretRef takes precedence over provider", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + includeSecret: true, + }, + insecure: true, + provider: "azure", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + img, err := createPodinfoImageFromTar("podinfo-6.1.6.tar", "6.1.6", server.registryHost, tt.craneOpts...) + g.Expect(err).ToNot(HaveOccurred()) + obj.Spec.URL = img.url + obj.Spec.Reference = &sourcev1.OCIRepositoryRef{ + Tag: img.tag, + } + + if tt.provider != "" { + obj.Spec.Provider = tt.provider + } + // If a provider specific image is provided, overwrite existing URL + // set earlier. It'll fail but it's necessary to set them because + // the login check expects the URLs to be of certain pattern. + if tt.providerImg != "" { + obj.Spec.URL = tt.providerImg + } + + if tt.secretOpts.username != "" && tt.secretOpts.password != "" { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + ".dockerconfigjson": []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`, + server.registryHost, tt.secretOpts.username, tt.secretOpts.password)), + }, + } + clientBuilder.WithObjects(secret) + + if tt.secretOpts.includeSA { + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sa-ocitest", + }, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: secret.Name}}, + } + clientBuilder.WithObjects(serviceAccount) + obj.Spec.ServiceAccountName = serviceAccount.Name + } + + if tt.secretOpts.includeSecret { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: secret.Name, + } + } + } + + if tt.tlsCertSecret != nil { + clientBuilder.WithObjects(tt.tlsCertSecret) + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.tlsCertSecret.Name, + } + } + if tt.insecure { + obj.Spec.Insecure = true + } + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + opts := makeRemoteOptions(ctx, makeTransport(tt.insecure), authn.DefaultKeychain, nil) + ref, err := r.getArtifactRef(obj, opts) + g.Expect(err).To(BeNil()) + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", img.tag, img.digest.String())) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", ref.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, &meta.Artifact{}, tmpDir) + if tt.wantErr { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func makeTransport(insecure bool) http.RoundTripper { + transport := remote.DefaultTransport.(*http.Transport).Clone() + if insecure { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} +func TestOCIRepository_CertSecret(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{ + withTLS: true, + withClientCertAuth: true, + }) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + regServer.Close() + }) + + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(tlsCA) + clientTLSCert, err := tls.X509KeyPair(clientPublicKey, clientPrivateKey) + g.Expect(err).ToNot(HaveOccurred()) + + transport := http.DefaultTransport.(*http.Transport) + transport.TLSClientConfig = &tls.Config{ + RootCAs: pool, + Certificates: []tls.Certificate{clientTLSCert}, + } + pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", regServer.registryHost, []crane.Option{ + crane.WithTransport(transport), + }...) + g.Expect(err).NotTo(HaveOccurred()) + + tlsSecretClientCert := corev1.Secret{ + Data: map[string][]byte{ + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": clientPrivateKey, + }, + } + + tests := []struct { + name string + url string + digest gcrv1.Hash + certSecret *corev1.Secret + expectreadyconition bool + expectedstatusmessage string + }{ + { + name: "test connection with CACert, Client Cert and Private Key", + url: pi.url, + digest: pi.digest, + certSecret: &tlsSecretClientCert, + expectreadyconition: true, + expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.String()), + }, + { + name: "test connection with no secret", + url: pi.url, + digest: pi.digest, + expectreadyconition: false, + expectedstatusmessage: "tls: failed to verify certificate: x509:", + }, + { + name: "test connection with with incorrect private key", + url: pi.url, + digest: pi.digest, + certSecret: &corev1.Secret{ + Data: map[string][]byte{ + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": []byte("invalid-key"), + }, + }, + expectreadyconition: false, + expectedstatusmessage: "failed to generate transport for '': failed to parse TLS certificate and key: tls: failed to find any PEM data in key input", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-test-resource", + Namespace: ns.Name, + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, + }, + } + + if tt.certSecret != nil { + tt.certSecret.ObjectMeta = metav1.ObjectMeta{ + GenerateName: "cert-secretref", + Namespace: ns.Name, + } + + g.Expect(testEnv.CreateAndWait(ctx, tt.certSecret)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, tt.certSecret)).To(Succeed()) }() + + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: tt.certSecret.Name} + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + resultobj := sourcev1.OCIRepository{} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + return len(resultobj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil || conditions.IsUnknown(&resultobj, meta.ReadyCondition) { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + conditions.IsReady(&resultobj) == tt.expectreadyconition + }, timeout).Should(BeTrue()) + + tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) + + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_ProxySecret(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + regServer.Close() + }) + + pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", regServer.registryHost) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + url string + digest gcrv1.Hash + proxySecret *corev1.Secret + expectreadyconition bool + expectedstatusmessage string + }{ + { + name: "test proxied connection", + url: pi.url, + digest: pi.digest, + proxySecret: &corev1.Secret{ + Data: map[string][]byte{ + "address": []byte(fmt.Sprintf("http://%s", proxyAddr)), + }, + }, + expectreadyconition: true, + expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.String()), + }, + { + name: "test proxy connection error", + url: pi.url, + digest: pi.digest, + proxySecret: &corev1.Secret{ + Data: map[string][]byte{ + "address": []byte(fmt.Sprintf("http://localhost:%d", proxyPort+1)), + }, + }, + expectreadyconition: false, + expectedstatusmessage: "failed to pull artifact", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-test-resource", + Namespace: ns.Name, + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, + }, + } + + if tt.proxySecret != nil { + tt.proxySecret.ObjectMeta = metav1.ObjectMeta{ + GenerateName: "proxy-secretref", + Namespace: ns.Name, + } + + g.Expect(testEnv.CreateAndWait(ctx, tt.proxySecret)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, tt.proxySecret)).To(Succeed()) }() + + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{Name: tt.proxySecret.Name} + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + resultobj := sourcev1.OCIRepository{} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + return len(resultobj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil || conditions.IsUnknown(&resultobj, meta.ReadyCondition) { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + conditions.IsReady(&resultobj) == tt.expectreadyconition + }, timeout).Should(BeTrue()) + + tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) + + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + img6 := podinfoVersions["6.1.6"] + img5 := podinfoVersions["6.1.5"] + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + want sreconcile.Result + wantErr bool + wantRevision string + assertConditions []metav1.Condition + }{ + { + name: "no reference (latest tag)", + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("latest@%s", img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "tag reference", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "semver reference", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.5", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "digest reference", + reference: &sourcev1.OCIRepositoryRef{ + Digest: img6.digest.String(), + }, + wantRevision: img6.digest.String(), + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "invalid tag reference", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.0", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, " MANIFEST_UNKNOWN"), + }, + }, + { + name: "invalid semver reference", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: "<= 6.1.0", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "failed to determine the artifact tag for 'oci://%s/podinfo': no match found for semver: <= 6.1.0", server.registryHost), + }, + }, + { + name: "invalid digest reference", + reference: &sourcev1.OCIRepositoryRef{ + Digest: "invalid", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to determine artifact digest"), + }, + }, + { + name: "semver should take precedence over tag", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.5", + Tag: "6.1.5", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "digest should take precedence over semver", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + SemVer: ">= 6.1.6", + Digest: img5.digest.String(), + }, + want: sreconcile.ResultSuccess, + wantRevision: img5.digest.String(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "checkout-strategy-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact.Revision).To(Equal(tt.wantRevision)) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + insecure bool + want sreconcile.Result + wantErr bool + wantErrMsg string + shouldSign bool + useDigest bool + addMultipleCerts bool + provideNoCert bool + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "signed image should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + shouldSign: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + useDigest: true, + wantErrMsg: "failed to verify the signature using provider 'notation': no signature is associated with \"\"", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no signature is associated with \"\", make sure the artifact was signed successfully"), + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Spec.Verify = nil + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + }, + want: sreconcile.ResultSuccess, + }, + { + name: "same artifact, verified before, change in obj gen verify again", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + // Set Verified with old observed generation and different reason/message. + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + // Set new object generation. + obj.SetGeneration(3) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no verify for already verified, verified condition remains the same", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + // Artifact present and custom verified condition reason/message. + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Verified", "verified"), + }, + }, + { + name: "signed image on an insecure registry passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "signed image on an insecure registry using digest as reference passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + useDigest: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level audit and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + useDigest: true, + want: sreconcile.ResultSuccess, + addMultipleCerts: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no cert provided should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + useDigest: true, + provideNoCert: true, + // no namespace but the namespace name should appear before the /notation-config + wantErrMsg: "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no certificates found in secret '/notation-config"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + signer, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelStrict.Name, Override: map[trustpolicy.ValidationType]trustpolicy.ValidationAction{trustpolicy.TypeRevocation: trustpolicy.ActionSkip}}, + TrustStores: []string{"ca:valid-trust-store"}, + TrustedIdentities: []string{"*"}, + }, + }, + } + + tmpDir := t.TempDir() + + policy, err := json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-trust-store", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + g.Expect(r.Create(ctx, caSecret)).ToNot(HaveOccurred()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, registryOptions{ + withTLS: !tt.insecure, + }) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + data := map[string][]byte{} + + if tt.addMultipleCerts { + data["a.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("a not used for signing").Cert.Raw + data["b.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("b not used for signing").Cert.Raw + data["c.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("c not used for signing").Cert.Raw + } + + if !tt.provideNoCert { + data["notation.crt"] = certTuple.Cert.Raw + } + + data["trustpolicy.json"] = policy + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "notation-config-", + }, + Data: data, + } + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + if tt.insecure { + obj.Spec.Insecure = true + } else { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "valid-trust-store", + } + } + + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: secret.GetName()} + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, tt.insecure, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.useDigest { + obj.Spec.Reference.Digest = podinfoVersions[tt.reference.Tag].digest.String() + } + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.shouldSign { + remoteRepo, err := oras.NewRepository(artifactRef.String()) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.insecure { + remoteRepo.PlainHTTP = true + } + + repo := registry.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifactRef.String(), + } + + _, err = notation.Sign(ctx, signer, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + } + + image := podinfoVersions[tt.reference.Tag] + assertConditions := tt.assertConditions + for k := range assertConditions { + if tt.useDigest { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", image.digest.String()) + } else { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + } + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + signatureVerification trustpolicy.SignatureVerification + trustedIdentities []string + trustStores []string + want sreconcile.Result + wantErr bool + wantErrMsg string + useDigest bool + usePolicyJson bool + provideNoPolicy bool + policyJson string + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "verification level audit and incorrect trust identity should fail verification but not error", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, + trustedIdentities: []string{"x509.subject: C=US, ST=WA, L=Seattle, O=Notary, CN=example.com"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "verification level permissive and incorrect trust identity should fail verification and error", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, + trustedIdentities: []string{"x509.subject: C=US, ST=WA, L=Seattle, O=Notary, CN=example.com"}, + trustStores: []string{"ca:valid-trust-store"}, + useDigest: true, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "failed to verify the signature using provider 'notation': signature verification failed\nfailed to verify signature with digest , signing certificate from the digital signature does not match the X.509 trusted identities [map[\"C\":\"US\" \"CN\":\"example.com\" \"L\":\"Seattle\" \"O\":\"Notary\" \"ST\":\"WA\"]] defined in the trust policy \"test-statement-name\"", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': signature verification failed\nfailed to verify signature with digest , signing certificate from the digital signature does not match the X.509 trusted identities [map[\"C\":\"US\" \"CN\":\"example.com\" \"L\":\"Seattle\" \"O\":\"Notary\" \"ST\":\"WA\"]] defined in the trust policy \"test-statement-name\""), + }, + }, + { + name: "verification level permissive and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, + trustedIdentities: []string{"*"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level audit and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, + trustedIdentities: []string{"*"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level skip and should not be marked as verified", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelSkip.Name}, + trustedIdentities: []string{}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "valid json but empty policy json should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + usePolicyJson: true, + policyJson: "{}", + wantErr: true, + wantErrMsg: "trust policy document has empty version", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "trust policy document has empty version, version must be specified"), + }, + }, + { + name: "empty string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + usePolicyJson: true, + policyJson: "", + wantErr: true, + wantErrMsg: fmt.Sprintf("error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), + }, + }, + { + name: "invalid character in string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + usePolicyJson: true, + policyJson: "{\"version\": \"1.0\u000A\", \"trust_policies\": []}", + wantErr: true, + wantErrMsg: fmt.Sprintf("error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), + }, + }, + { + name: "empty string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + provideNoPolicy: true, + wantErr: true, + wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + signer, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + tmpDir := t.TempDir() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, registryOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var policy []byte + + if !tt.usePolicyJson { + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: tt.signatureVerification, + TrustStores: tt.trustStores, + TrustedIdentities: tt.trustedIdentities, + }, + }, + } + + policy, err = json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + } else { + policy = []byte(tt.policyJson) + } + + data := map[string][]byte{} + + if !tt.provideNoPolicy { + data["trustpolicy.json"] = policy + } + + data["notation.crt"] = certTuple.Cert.Raw + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "notation-", + }, + Data: data, + } + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + obj.Spec.Insecure = true + + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: secret.GetName()} + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.useDigest { + obj.Spec.Reference.Digest = podinfoVersions[tt.reference.Tag].digest.String() + } + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo, err := oras.NewRepository(artifactRef.String()) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo.PlainHTTP = true + + repo := registry.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifactRef.String(), + } + + _, err = notation.Sign(ctx, signer, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + + image := podinfoVersions[tt.reference.Tag] + signatureDigest := "" + + artifactDescriptor, err := repo.Resolve(ctx, image.tag) + g.Expect(err).ToNot(HaveOccurred()) + _ = repo.ListSignatures(ctx, artifactDescriptor, func(signatureManifests []ocispec.Descriptor) error { + g.Expect(len(signatureManifests)).Should(Equal(1)) + signatureDigest = signatureManifests[0].Digest.String() + return nil + }) + + assertConditions := tt.assertConditions + for k := range assertConditions { + if tt.useDigest { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", image.digest.String()) + } else { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + } + + if signatureDigest != "" { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", signatureDigest) + } + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + if signatureDigest != "" { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", signatureDigest) + } + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + insecure bool + want sreconcile.Result + wantErr bool + wantErrMsg string + shouldSign bool + keyless bool + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "signed image should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + shouldSign: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + wantErrMsg: "failed to verify the signature using provider 'cosign': no matching signatures were found for ''", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no matching signatures were found for ''"), + }, + }, + { + name: "unsigned image should not pass keyless verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + want: sreconcile.ResultEmpty, + keyless: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no signatures found"), + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Spec.Verify = nil + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + }, + want: sreconcile.ResultSuccess, + }, + { + name: "same artifact, verified before, change in obj gen verify again", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + // Set Verified with old observed generation and different reason/message. + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + // Set new object generation. + obj.SetGeneration(3) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no verify for already verified, verified condition remains the same", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + // Artifact present and custom verified condition reason/message. + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Verified", "verified"), + }, + }, + { + name: "signed image on an insecure registry passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + pf := func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + } + + keys, err := cosign.GenerateKeyPair(pf) + g.Expect(err).ToNot(HaveOccurred()) + + tmpDir := t.TempDir() + err = os.WriteFile(path.Join(tmpDir, "cosign.key"), keys.PrivateBytes, 0600) + g.Expect(err).ToNot(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cosign-key", + }, + Data: map[string][]byte{ + "cosign.pub": keys.PublicBytes, + }} + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-cert-cosign", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + g.Expect(r.Create(ctx, caSecret)).ToNot(HaveOccurred()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + regOpts := registryOptions{ + withTLS: !tt.insecure, + } + server, err := setupRegistryServer(ctx, workspaceDir, regOpts) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + if tt.insecure { + obj.Spec.Insecure = true + } else { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "ca-cert-cosign", + } + } + + if !tt.keyless { + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: "cosign-key"} + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, tt.insecure, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.shouldSign { + ko := coptions.KeyOpts{ + KeyRef: path.Join(tmpDir, "cosign.key"), + PassFunc: pf, + } + + ro := &coptions.RootOptions{ + Timeout: timeout, + } + err = sign.SignCmd(ro, ko, coptions.SignOptions{ + Upload: true, + SkipConfirmation: true, + TlogUpload: false, + + Registry: coptions.RegistryOptions{Keychain: keychain, AllowInsecure: true, AllowHTTPRegistry: tt.insecure}, + }, []string{artifactRef.String()}) + + g.Expect(err).ToNot(HaveOccurred()) + } + + image := podinfoVersions[tt.reference.Tag] + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testing.T) { + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + want sreconcile.Result + wantErr bool + wantErrMsg string + beforeFunc func(obj *sourcev1.OCIRepository) + assertConditions []metav1.Condition + revision string + }{ + { + name: "signed image with no identity matching specified should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "signed image with correct subject and issuer should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "signed image with both incorrect and correct identity matchers should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "signed image with incorrect subject and issuer should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + wantErr: true, + want: sreconcile.ResultEmpty, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no matching signatures: none of the expected identities matched what was in the certificate"), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.0", + }, + wantErr: true, + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no signatures found"), + }, + revision: "6.1.0@sha256:3816fe9636a297f0c934b1fa0f46fe4c068920375536ac2803604adfb4c55894", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Reference: tt.reference, + }, + } + url := strings.TrimPrefix(obj.Spec.URL, "oci://") + ":" + tt.reference.Tag + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", tt.revision) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", url) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, t.TempDir()) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", url) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_noop(t *testing.T) { + g := NewWithT(t) + + testRevision := "6.1.5@sha256:8e4057c22d531d40e12b065443cb0d80394b7257c4dc557cb1fbd4dce892b86d" + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + _, err = pushMultiplePodinfoImages(server.registryHost, true, "6.1.5") + g.Expect(err).ToNot(HaveOccurred()) + + // NOTE: The following verifies if it was a noop run by checking the + // artifact metadata which is unknown unless the image is pulled. + + tests := []struct { + name string + beforeFunc func(obj *sourcev1.OCIRepository) + afterFunc func(g *WithT, artifact *meta.Artifact) + }{ + { + name: "full reconcile - no existing artifact", + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + { + name: "noop - artifact revisions match", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).To(BeEmpty()) + }, + }, + { + name: "full reconcile - same rev, unobserved ignore", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.ObservedIgnore = ptr.To("aaa") + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + { + name: "noop - same rev, observed ignore", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Ignore = ptr.To("aaa") + obj.Status.ObservedIgnore = ptr.To("aaa") + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).To(BeEmpty()) + }, + }, + { + name: "full reconcile - same rev, unobserved layer selector", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + { + name: "noop - same rev, observed layer selector", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).To(BeEmpty()) + }, + }, + { + name: "full reconcile - same rev, observed layer selector changed", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerExtract, + } + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "noop-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.5"}, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultSuccess)) + + if tt.afterFunc != nil { + tt.afterFunc(g, artifact) + } + }) + } +} + +func TestOCIRepository_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + targetPath string + artifact *meta.Artifact + beforeFunc func(obj *sourcev1.OCIRepository) + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertPaths []string + assertConditions []metav1.Condition + afterFunc func(g *WithT, obj *sourcev1.OCIRepository) + }{ + { + name: "Archiving Artifact creates correct files and condition", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new revision") + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:6a5bd135a816ec0ad246c41cfdd87629e40ef6520001aeb2d0118a703abe9e7a")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact with source ignore", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{Revision: "revision"}, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Ignore = ptr.To("foo.txt") + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:9102e9c8626e48821a91a4963436f1673cd85f8fb3deb843c992f85b995c38ea")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "No status changes if artifact is already present", + artifact: &meta.Artifact{ + Revision: "revision", + }, + targetPath: "testdata/oci/repository", + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "revision", + } + }, + assertArtifact: &meta.Artifact{ + Revision: "revision", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, unobserved ignore, rebuild artifact", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + obj.Spec.Ignore = ptr.To("aaa") + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(*obj.Status.ObservedIgnore).To(Equal("aaa")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, unobserved layer selector, rebuild artifact", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, observed layer selector changed, rebuild artifact", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + Path: "foo.txt", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) + g.Expect(obj.Status.ObservedLayerSelector.Operation).To(Equal(sourcev1.OCILayerCopy)) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, observed ignore and layer selector, up-to-date", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Ignore = ptr.To("aaa") + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + obj.Status.ObservedIgnore = ptr.To("aaa") + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + }, + want: sreconcile.ResultSuccess, + assertArtifact: &meta.Artifact{ + Revision: "revision", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "target path doesn't exist", + targetPath: "testdata/oci/non-existent", + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path: "), + }, + }, + { + name: "target path is a file", + targetPath: "testdata/oci/repository/foo.txt", + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "source path 'testdata/oci/repository/foo.txt' is not a directory"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + _ = resetChmod(tt.targetPath, 0o755, 0o644) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "reconcile-artifact-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &meta.Artifact{} + if tt.artifact != nil { + artifact = tt.artifact + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(ctx, sp, obj, artifact, tt.targetPath) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.assertArtifact != nil { + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.artifact)) + } + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + + for _, p := range tt.assertPaths { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + p = filepath.Join(filepath.Dir(localPath), p) + _, err := os.Lstat(p) + g.Expect(err).ToNot(HaveOccurred()) + } + }) + } +} + +func TestOCIRepository_getArtifactRef(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + imgs, err := pushMultiplePodinfoImages(server.registryHost, true, + "6.1.4", + "6.1.5-beta.1", + "6.1.5-rc.1", + "6.1.5", + "6.1.6-rc.1", + "6.1.6", + ) + g.Expect(err).ToNot(HaveOccurred()) + + tests := []struct { + name string + url string + reference *sourcev1.OCIRepositoryRef + wantErr bool + want string + }{ + { + name: "valid url with no reference", + url: "oci://ghcr.io/stefanprodan/charts", + want: "ghcr.io/stefanprodan/charts:latest", + }, + { + name: "valid url with tag reference", + url: "oci://ghcr.io/stefanprodan/charts", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + want: "ghcr.io/stefanprodan/charts:6.1.6", + }, + { + name: "valid url with digest reference", + url: "oci://ghcr.io/stefanprodan/charts", + reference: &sourcev1.OCIRepositoryRef{ + Digest: imgs["6.1.6"].digest.String(), + }, + want: "ghcr.io/stefanprodan/charts@" + imgs["6.1.6"].digest.String(), + }, + { + name: "valid url with semver reference", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.6", + }, + want: server.registryHost + "/podinfo:6.1.6", + }, + { + name: "invalid url without oci prefix", + url: "ghcr.io/stefanprodan/charts", + wantErr: true, + }, + { + name: "valid url with semver filter", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.x-0", + SemverFilter: ".*-rc.*", + }, + want: server.registryHost + "/podinfo:6.1.6-rc.1", + }, + { + name: "valid url with semver filter and unexisting version", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.x-0", + SemverFilter: ".*-alpha.*", + }, + wantErr: true, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "artifact-url-", + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + opts := makeRemoteOptions(ctx, makeTransport(true), authn.DefaultKeychain, nil) + got, err := r.getArtifactRef(obj, opts) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got.String()).To(Equal(tt.want)) + }) + } +} + +func TestOCIRepository_invalidURL(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-invalid-url-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/test/test:v1", + Interval: metav1.Duration{Duration: 60 * time.Minute}, + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + resultobj := sourcev1.OCIRepository{} + + // Wait for the object to fail + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + !conditions.IsUnknown(&resultobj, meta.ReadyCondition) + }, timeout).Should(BeTrue()) + + // Verify that stalled condition is present in status + stalledCondition := conditions.Get(&resultobj, meta.StalledCondition) + g.Expect(stalledCondition).ToNot(BeNil()) + g.Expect(stalledCondition.Reason).Should(Equal(sourcev1.URLInvalidReason)) +} + +func TestOCIRepository_objectLevelWorkloadIdentityFeatureGate(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-olwifg-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + err = testEnv.Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "test", + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Provider: "aws", + ServiceAccountName: "test", + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + resultobj := &sourcev1.OCIRepository{} + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + return conditions.IsStalled(resultobj) + }).Should(BeTrue()) + + stalledCondition := conditions.Get(resultobj, meta.StalledCondition) + g.Expect(stalledCondition).ToNot(BeNil()) + g.Expect(stalledCondition.Reason).Should(Equal(meta.FeatureGateDisabledReason)) + g.Expect(stalledCondition.Message).Should(Equal("to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller")) + + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + resultobj.Annotations = map[string]string{ + meta.ReconcileRequestAnnotation: time.Now().Format(time.RFC3339), + } + return testEnv.Update(ctx, resultobj) == nil + }).Should(BeTrue()) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + logOCIRepoStatus(t, resultobj) + return !conditions.IsReady(resultobj) && + conditions.GetReason(resultobj, meta.ReadyCondition) == sourcev1.AuthenticationFailedReason + }).Should(BeTrue()) +} + +func TestOCIRepository_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.OCIRepository, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + assertArtifact *meta.Artifact + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + + if n != len(revisions)-1 { + time.Sleep(time.Second) + } + } + + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/oci-reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/oci-reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/oci-reconcile-storage/d.txt", + "/oci-reconcile-storage/c.txt", + "!/oci-reconcile-storage/b.txt", + "!/oci-reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/oci-reconcile-storage/invalid.txt", + Revision: "e", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/oci-reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/oci-reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/oci-reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/oci-reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/oci-reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/oci-reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/oci-reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/oci-reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(ctx, sp, obj, &meta.Artifact{}, "") + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + + g.Expect(absoluteP).ToNot(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestOCIRepository_ReconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &OCIRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.OCIRepositoryStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestOCIRepositoryReconciler_notify(t *testing.T) { + + noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason") + noopErr.Ignore = true + + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.OCIRepository) + newObjBeforeFunc func(obj *sourcev1.OCIRepository) + commit git.Commit + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.URL = "oci://newurl.io" + obj.Status.Artifact = &meta.Artifact{ + Revision: "xxx", + Digest: "yyy", + Metadata: map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: "6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872", + }, + } + }, + wantEvent: "Normal NewArtifact stored artifact with revision 'xxx' from 'oci://newurl.io', origin source 'https://github.com/stefanprodan/podinfo', origin revision '6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872'", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.URL = "oci://newurl.io" + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal Succeeded stored artifact with revision 'xxx' from 'oci://newurl.io'", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.URL = "oci://newurl.io" + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal NewArtifact stored artifact with revision 'aaa' from 'oci://newurl.io'", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + { + name: "no updates on requeue", + res: sreconcile.ResultRequeue, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.OCIRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &OCIRepositoryReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + reconciler.notify(ctx, oldObj, newObj, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +type artifactFixture struct { + expectedPath string + expectedChecksum string +} + +type podinfoImage struct { + url string + tag string + digest gcrv1.Hash +} + +func createPodinfoImageFromTar(tarFileName, tag, registryURL string, opts ...crane.Option) (*podinfoImage, error) { + // Create Image + image, err := crane.Load(path.Join("testdata", "podinfo", tarFileName)) + if err != nil { + return nil, err + } + + image = setPodinfoImageAnnotations(image, tag) + + // url.Parse doesn't handle urls with no scheme well e.g localhost: + if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) { + registryURL = fmt.Sprintf("http://%s", registryURL) + } + + myURL, err := url.Parse(registryURL) + if err != nil { + return nil, err + } + repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host) + + // Image digest + podinfoImageDigest, err := image.Digest() + if err != nil { + return nil, err + } + + // Push image + err = crane.Push(image, repositoryURL, opts...) + if err != nil { + return nil, err + } + + // Tag the image + err = crane.Tag(repositoryURL, tag, opts...) + if err != nil { + return nil, err + } + + return &podinfoImage{ + url: "oci://" + repositoryURL, + tag: tag, + digest: podinfoImageDigest, + }, nil +} + +func pushMultiplePodinfoImages(serverURL string, insecure bool, versions ...string) (map[string]podinfoImage, error) { + podinfoVersions := make(map[string]podinfoImage) + + var opts []crane.Option + // If the registry is insecure then instruct configure an insecure HTTP client, + // otherwise add the root CA certificate since the HTTPS server is self signed. + if insecure { + opts = append(opts, crane.Insecure) + } else { + transport := http.DefaultTransport.(*http.Transport) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(tlsCA) + transport.TLSClientConfig = &tls.Config{ + RootCAs: pool, + } + opts = append(opts, crane.WithTransport(transport)) + } + for i := 0; i < len(versions); i++ { + pi, err := createPodinfoImageFromTar(fmt.Sprintf("podinfo-%s.tar", versions[i]), versions[i], serverURL, opts...) + if err != nil { + return nil, err + } + + podinfoVersions[versions[i]] = *pi + + } + + return podinfoVersions, nil +} + +func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { + metadata := map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: fmt.Sprintf("%s@sha1:b3b00fe35424a45d373bf4c7214178bc36fd7872", tag), + } + return mutate.Annotations(img, metadata).(gcrv1.Image) +} + +func TestOCIContentConfigChanged(t *testing.T) { + tests := []struct { + name string + spec sourcev1.OCIRepositorySpec + status sourcev1.OCIRepositoryStatus + want bool + }{ + { + name: "same ignore, no layer selector", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + }, + want: false, + }, + { + name: "different ignore, no layer selector", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("mmm"), + }, + want: true, + }, + { + name: "same ignore, same layer selector", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: false, + }, + { + name: "same ignore, different layer selector operation", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerCopy, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: true, + }, + { + name: "same ignore, different layer selector mediatype", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "bar", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: true, + }, + { + name: "no ignore, same layer selector", + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: false, + }, + { + name: "no ignore, different layer selector", + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "bar", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + Spec: tt.spec, + Status: tt.status, + } + + g.Expect(ociContentConfigChanged(obj)).To(Equal(tt.want)) + }) + } +} diff --git a/controllers/source_predicate.go b/internal/controller/source_predicate.go similarity index 94% rename from controllers/source_predicate.go rename to internal/controller/source_predicate.go index 60786b87e..968f2def9 100644 --- a/controllers/source_predicate.go +++ b/internal/controller/source_predicate.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) type SourceRevisionChangePredicate struct { diff --git a/controllers/suite_test.go b/internal/controller/suite_test.go similarity index 60% rename from controllers/suite_test.go rename to internal/controller/suite_test.go index b2956b58c..ad0365616 100644 --- a/controllers/suite_test.go +++ b/internal/controller/suite_test.go @@ -14,20 +14,31 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "bytes" "context" + "crypto/tls" + "crypto/x509" "fmt" "io" - "io/ioutil" + "log" "math/rand" + "net" + "net/http" "os" "path/filepath" "testing" "time" + "github.com/distribution/distribution/v3/configuration" + dockerRegistry "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" + "github.com/foxcpp/go-mockdns" + "github.com/phayes/freeport" + "github.com/sirupsen/logrus" "golang.org/x/crypto/bcrypt" "helm.sh/helm/v3/pkg/getter" helmreg "helm.sh/helm/v3/pkg/registry" @@ -35,23 +46,19 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/testenv" "github.com/fluxcd/pkg/testserver" - "github.com/phayes/freeport" - - "github.com/distribution/distribution/v3/configuration" - dockerRegistry "github.com/distribution/distribution/v3/registry" - _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" - _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" - git2go "github.com/libgit2/git2go/v33" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/cache" - "github.com/fluxcd/source-controller/internal/features" - "github.com/fluxcd/source-controller/internal/helm/registry" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" // +kubebuilder:scaffold:imports ) @@ -75,8 +82,9 @@ const ( ) var ( + k8sClient client.Client testEnv *testenv.Environment - testStorage *Storage + testStorage *storage.Storage testServer *testserver.ArtifactServer testMetricsH controller.Metrics ctx = ctrl.SetupSignalHandler() @@ -96,9 +104,11 @@ var ( ) var ( - tlsPublicKey []byte - tlsPrivateKey []byte - tlsCA []byte + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte + clientPublicKey []byte + clientPrivateKey []byte ) var ( @@ -106,20 +116,18 @@ var ( testCache *cache.Cache ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - type registryClientTestServer struct { out io.Writer registryHost string workspaceDir string registryClient *helmreg.Client + dnsServer *mockdns.Server } type registryOptions struct { - withBasicAuth bool - withTLS bool + withBasicAuth bool + withTLS bool + withClientCertAuth bool } func setupRegistryServer(ctx context.Context, workspaceDir string, opts registryOptions) (*registryClientTestServer, error) { @@ -134,15 +142,11 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry var out bytes.Buffer server.out = &out - // init test client - client, err := helmreg.NewClient( + // init test client options + clientOpts := []helmreg.ClientOption{ helmreg.ClientOptDebug(true), helmreg.ClientOptWriter(server.out), - ) - if err != nil { - return nil, fmt.Errorf("failed to create registry client: %s", err) } - server.registryClient = client config := &configuration.Configuration{} port, err := freeport.GetFreePort() @@ -150,11 +154,30 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry return nil, fmt.Errorf("failed to get free port: %s", err) } - server.registryHost = fmt.Sprintf("localhost:%d", port) - config.HTTP.Addr = fmt.Sprintf("127.0.0.1:%d", port) + // Change the registry host to a host which is not localhost and + // mock DNS to map example.com to 127.0.0.1. + // This is required because Docker enforces HTTP if the registry + // is hosted on localhost/127.0.0.1. + if opts.withTLS { + server.registryHost = fmt.Sprintf("example.com:%d", port) + // Disable DNS server logging as it is extremely chatty. + dnsLog := log.Default() + dnsLog.SetOutput(io.Discard) + server.dnsServer, err = mockdns.NewServerWithLogger(map[string]mockdns.Zone{ + "example.com.": { + A: []string{"127.0.0.1"}, + }, + }, dnsLog, false) + if err != nil { + return nil, err + } + server.dnsServer.PatchNet(net.DefaultResolver) + } else { + server.registryHost = fmt.Sprintf("127.0.0.1:%d", port) + } + + config.HTTP.Addr = fmt.Sprintf(":%d", port) config.HTTP.DrainTimeout = time.Duration(10) * time.Second - config.Log.AccessLog.Disabled = true - config.Log.Level = "error" config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} if opts.withBasicAuth { @@ -165,8 +188,7 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry } htpasswdPath := filepath.Join(workspaceDir, testRegistryHtpasswdFileBasename) - err = ioutil.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testRegistryUsername, string(pwBytes))), 0644) - if err != nil { + if err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testRegistryUsername, string(pwBytes))), 0644); err != nil { return nil, fmt.Errorf("failed to create htpasswd file: %s", err) } @@ -182,29 +204,90 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry if opts.withTLS { config.HTTP.TLS.Certificate = "testdata/certs/server.pem" config.HTTP.TLS.Key = "testdata/certs/server-key.pem" + // Configure CA certificates only if client cert authentication is enabled. + if opts.withClientCertAuth { + config.HTTP.TLS.ClientCAs = []string{"testdata/certs/ca.pem"} + } + + // add TLS configured HTTP client option to clientOpts + httpClient, err := tlsConfiguredHTTPCLient() + if err != nil { + return nil, fmt.Errorf("failed to create TLS configured HTTP client: %s", err) + } + clientOpts = append(clientOpts, helmreg.ClientOptHTTPClient(httpClient)) + } else { + clientOpts = append(clientOpts, helmreg.ClientOptPlainHTTP()) } - dockerRegistry, err := dockerRegistry.NewRegistry(ctx, config) + // setup logger options + config.Log.AccessLog.Disabled = true + config.Log.Level = "error" + logrus.SetOutput(io.Discard) + + registry, err := dockerRegistry.NewRegistry(ctx, config) if err != nil { return nil, fmt.Errorf("failed to create docker registry: %w", err) } + // init test client + helmClient, err := helmreg.NewClient(clientOpts...) + if err != nil { + return nil, fmt.Errorf("failed to create registry client: %s", err) + } + server.registryClient = helmClient + // Start Docker registry - go dockerRegistry.ListenAndServe() + go registry.ListenAndServe() return server, nil } -func TestMain(m *testing.M) { - mustHaveNoThreadSupport() +func tlsConfiguredHTTPCLient() (*http.Client, error) { + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(tlsCA) { + return nil, fmt.Errorf("failed to append CA certificate to pool") + } + cert, err := tls.LoadX509KeyPair("testdata/certs/server.pem", "testdata/certs/server-key.pem") + if err != nil { + return nil, fmt.Errorf("failed to load server certificate: %s", err) + } + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + Certificates: []tls.Certificate{ + cert, + }, + }, + }, + } + return httpClient, nil +} +func (r *registryClientTestServer) Close() { + if r.dnsServer != nil { + mockdns.UnpatchNet(net.DefaultResolver) + r.dnsServer.Close() + } +} + +func TestMain(m *testing.M) { initTestTLS() utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) - testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) + testEnv = testenv.New( + testenv.WithCRDPath(filepath.Join("..", "..", "config", "crd", "bases")), + testenv.WithMaxConcurrentReconciles(4), + ) var err error + // Initialize a cacheless client for tests that need the latest objects. + k8sClient, err = client.New(testEnv.Config, client.Options{Scheme: scheme.Scheme}) + if err != nil { + panic(fmt.Sprintf("failed to create k8s client: %v", err)) + } + testServer, err = testserver.NewTempArtifactServer() if err != nil { panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) @@ -217,7 +300,7 @@ func TestMain(m *testing.M) { panic(fmt.Sprintf("Failed to create a test storage: %v", err)) } - testMetricsH = controller.MustMakeMetrics(testEnv) + testMetricsH = controller.NewMetrics(testEnv, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer) testWorkspaceDir, err := os.MkdirTemp("", "registry-test-") if err != nil { @@ -229,19 +312,16 @@ func TestMain(m *testing.M) { if err != nil { panic(fmt.Sprintf("Failed to create a test registry server: %v", err)) } - - if err = managed.InitManagedTransport(); err != nil { - panic(fmt.Sprintf("Failed to initialize libgit2 managed transport: %v", err)) - } + defer testRegistryServer.Close() if err := (&GitRepositoryReconciler{ - Client: testEnv, - EventRecorder: record.NewFakeRecorder(32), - Metrics: testMetricsH, - Storage: testStorage, - features: features.FeatureGates(), - Libgit2TransportInitialized: managed.Enabled, - }).SetupWithManager(testEnv); err != nil { + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManagerAndOptions(testEnv, GitRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) } @@ -250,7 +330,9 @@ func TestMain(m *testing.M) { EventRecorder: record.NewFakeRecorder(32), Metrics: testMetricsH, Storage: testStorage, - }).SetupWithManager(testEnv); err != nil { + }).SetupWithManagerAndOptions(testEnv, BucketReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) } @@ -262,7 +344,9 @@ func TestMain(m *testing.M) { EventRecorder: record.NewFakeRecorder(32), Metrics: testMetricsH, Storage: testStorage, - }).SetupWithManager(testEnv); err != nil { + }).SetupWithManagerAndOptions(testEnv, OCIRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { panic(fmt.Sprintf("Failed to start OCIRepositoryReconciler: %v", err)) } @@ -275,20 +359,12 @@ func TestMain(m *testing.M) { Cache: testCache, TTL: 1 * time.Second, CacheRecorder: cacheRecorder, - }).SetupWithManager(testEnv); err != nil { + }).SetupWithManagerAndOptions(testEnv, HelmRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) } - if err = (&HelmRepositoryOCIReconciler{ - Client: testEnv, - EventRecorder: record.NewFakeRecorder(32), - Metrics: testMetricsH, - Getters: testGetters, - RegistryClientGenerator: registry.ClientGenerator, - }).SetupWithManager(testEnv); err != nil { - panic(fmt.Sprintf("Failed to start HelmRepositoryOCIReconciler: %v", err)) - } - if err := (&HelmChartReconciler{ Client: testEnv, EventRecorder: record.NewFakeRecorder(32), @@ -298,8 +374,10 @@ func TestMain(m *testing.M) { Cache: testCache, TTL: 1 * time.Second, CacheRecorder: cacheRecorder, - }).SetupWithManager(testEnv); err != nil { - panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) + }).SetupWithManagerAndOptions(ctx, testEnv, HelmChartReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { + panic(fmt.Sprintf("Failed to start HelmChartReconciler: %v", err)) } go func() { @@ -344,14 +422,30 @@ func initTestTLS() { if err != nil { panic(err) } + clientPrivateKey, err = os.ReadFile("testdata/certs/client-key.pem") + if err != nil { + panic(err) + } + clientPublicKey, err = os.ReadFile("testdata/certs/client.pem") + if err != nil { + panic(err) + } } -func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { - storage, err := NewStorage(s.Root(), s.URL(), retentionTTL, retentionRecords) +func newTestStorage(s *testserver.HTTPServer) (*storage.Storage, error) { + opts := &config.Options{ + StoragePath: s.Root(), + StorageAddress: s.URL(), + StorageAdvAddress: s.URL(), + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) if err != nil { return nil, err } - return storage, nil + return st, nil } var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") @@ -368,21 +462,7 @@ func int64p(i int64) *int64 { return &i } -// This provides a regression assurance for image-automation-controller/#339. -// Validates that: -// - libgit2 was built with no support for threads. -// - git2go accepts libgit2 built with no support for threads. -// -// The logic below does the validation of the former, whilst -// referring to git2go forces its init() execution, which is -// where any validation to that effect resides. -// -// git2go does not support threadless libgit2 by default, -// hence a fork is being used which disables such validation. -// -// TODO: extract logic into pkg. -func mustHaveNoThreadSupport() { - if git2go.Features()&git2go.FeatureThreads != 0 { - panic("libgit2 must not be build with thread support") - } +func logOCIRepoStatus(t *testing.T, obj *sourcev1.OCIRepository) { + sts, _ := yaml.Marshal(obj.Status) + t.Log(string(sts)) } diff --git a/controllers/testdata/certs/Makefile b/internal/controller/testdata/certs/Makefile similarity index 76% rename from controllers/testdata/certs/Makefile rename to internal/controller/testdata/certs/Makefile index dca2408c3..22b40466b 100644 --- a/controllers/testdata/certs/Makefile +++ b/internal/controller/testdata/certs/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -all: server-key.pem +all: server-key.pem client-key.pem ca-key.pem: ca-csr.json cfssl gencert -initca ca-csr.json | cfssljson -bare ca – @@ -28,3 +28,13 @@ server-key.pem: server-csr.json ca-config.json ca-key.pem server-csr.json | cfssljson -bare server sever.pem: server-key.pem server.csr: server-key.pem + +client-key.pem: client-csr.json ca-config.json ca-key.pem + cfssl gencert \ + -ca=ca.pem \ + -ca-key=ca-key.pem \ + -config=ca-config.json \ + -profile=web-servers \ + client-csr.json | cfssljson -bare client +client.pem: client-key.pem +client.csr: client-key.pem diff --git a/controllers/testdata/certs/ca-config.json b/internal/controller/testdata/certs/ca-config.json similarity index 100% rename from controllers/testdata/certs/ca-config.json rename to internal/controller/testdata/certs/ca-config.json diff --git a/controllers/testdata/certs/ca-csr.json b/internal/controller/testdata/certs/ca-csr.json similarity index 100% rename from controllers/testdata/certs/ca-csr.json rename to internal/controller/testdata/certs/ca-csr.json diff --git a/internal/controller/testdata/certs/ca-key.pem b/internal/controller/testdata/certs/ca-key.pem new file mode 100644 index 000000000..5f78af275 --- /dev/null +++ b/internal/controller/testdata/certs/ca-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEICJFvVFVBSL0EteniBRfI9M1tm9Vmh9CKv7dhvZSqtV6oAoGCCqGSM49 +AwEHoUQDQgAE+EGQ9wZw/XIbyCwu7wvbzoGhpE2KtZwSUXboPEAgacfaqfgdT92D +If9qYie8umbgUymQnnqN8fRnT/wqqdBLDg== +-----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/ca.csr b/internal/controller/testdata/certs/ca.csr new file mode 100644 index 000000000..ed5490ce2 --- /dev/null +++ b/internal/controller/testdata/certs/ca.csr @@ -0,0 +1,9 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBHzCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6gSzBJBgkqhkiG9w0BCQ4x +PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt +cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiEA1PxOWSIrmLb5IeejHvfx +AkjpamR/GTLhSzXlGv1hCmsCIDSeZL2OF5R5k2v4giXiB6GUfmawykGkO2fIG1kq +5l5V +-----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/ca.pem b/internal/controller/testdata/certs/ca.pem new file mode 100644 index 000000000..72644519d --- /dev/null +++ b/internal/controller/testdata/certs/ca.pem @@ -0,0 +1,11 @@ +-----BEGIN CERTIFICATE----- +MIIBiDCCAS2gAwIBAgIUCRPU/Fa1nIWlk7TUejHGI+WKJFAwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzAw +NDIxMDcwNTAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6jUzBRMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS+cS2gBCfSCltLUMNY0kG2 +mj9zEDAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0kAMEYCIQC33kO/m+ab +i/2dlkg7hab4jCkFkxV3fWiOP0lbrLIMYQIhAPOcHeXmGE32apXKoZ6IfGJdMtz1 +3bkHYeqNs2qtpQ/5 +-----END CERTIFICATE----- diff --git a/controllers/testdata/certs/server-csr.json b/internal/controller/testdata/certs/client-csr.json similarity index 100% rename from controllers/testdata/certs/server-csr.json rename to internal/controller/testdata/certs/client-csr.json diff --git a/internal/controller/testdata/certs/client-key.pem b/internal/controller/testdata/certs/client-key.pem new file mode 100644 index 000000000..f55b40b4d --- /dev/null +++ b/internal/controller/testdata/certs/client-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIFVLYwGEhqLW/WYnsA9om6cSxcgVsKnwIWXc34DF7LpwoAoGCCqGSM49 +AwEHoUQDQgAE5H76We32W5cQq8DRJT+pteyh53GUBiI5IbM+qVWgsCIFJEaSJKgs +mv1H7c3NhP292Pgr6vdWJACLQHzmpsVpmg== +-----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/client.csr b/internal/controller/testdata/certs/client.csr new file mode 100644 index 000000000..3699ea27b --- /dev/null +++ b/internal/controller/testdata/certs/client.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqgSzBJBgkqhkiG9w0BCQ4xPDA6 +MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiB0px2gw2ICFz26zAajtJyoNHl+ +inOXY5ohtzP4ag+NXQIgAbjIsOUuQ7JT31DdI6yCVfO014hHawtEsdV4rxTrQMA= +-----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/client.pem b/internal/controller/testdata/certs/client.pem new file mode 100644 index 000000000..9db876e59 --- /dev/null +++ b/internal/controller/testdata/certs/client.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB7DCCAZKgAwIBAgIUPH5zyEsXoFMCMkZaM2s6YtnoQcgwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqjgbowgbcwDgYDVR0PAQH/BAQD +AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MB0GA1UdDgQWBBTqud4vpysQdb1/5K3RoDXvBdQGgzAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIhAM0u +Eo6u3BDtw5bezhLa/THDy4QT63ktpAff9i/QJOErAiAifOvW7n5ZTLjjSnJ+dCtr +Avtupcg1WLyryhliqtNKhg== +-----END CERTIFICATE----- diff --git a/pkg/git/strategy/testdata/certs/server-csr.json b/internal/controller/testdata/certs/server-csr.json similarity index 100% rename from pkg/git/strategy/testdata/certs/server-csr.json rename to internal/controller/testdata/certs/server-csr.json diff --git a/internal/controller/testdata/certs/server-key.pem b/internal/controller/testdata/certs/server-key.pem new file mode 100644 index 000000000..64d7da136 --- /dev/null +++ b/internal/controller/testdata/certs/server-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIH19RQir/x9wHNAvHITu7/3Y4ckQ3GsNyEGYF3/nalheoAoGCCqGSM49 +AwEHoUQDQgAEvqlooNIpRmCjv9yBzjqoyXZvcU8zo9npYm3HPX7TReYetrkkJh/P +6a5NDJhnWemcj9iZdm2kGTE7MCgGi4mRog== +-----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/server.csr b/internal/controller/testdata/certs/server.csr new file mode 100644 index 000000000..b0fce1781 --- /dev/null +++ b/internal/controller/testdata/certs/server.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKgSzBJBgkqhkiG9w0BCQ4xPDA6 +MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiAJbvDLjrCkTRvTjrv2wXLN9Hgu +p6SrTQJUWlIj3S8DggIgJraxPvnwfeKE5dM7ZgJXADHy838h04dQ+Za7hS899V8= +-----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/server.pem b/internal/controller/testdata/certs/server.pem new file mode 100644 index 000000000..f3345e3b2 --- /dev/null +++ b/internal/controller/testdata/certs/server.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB6zCCAZKgAwIBAgIUSGuttQSdoyWQzeZ6GkiKORYYUvQwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKjgbowgbcwDgYDVR0PAQH/BAQD +AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MB0GA1UdDgQWBBSNrNAk9jWUcFjxjAKzuDwsBrG1NDAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDRwAwRAIgIcrb +xGgcRsmP/R6Qo+Xe/w1UvNDaWJfsWO+hq1DtOQgCIEyGi3ClowsGnNpo734ArWbG +taem7qVKZJmCWRM6DFuT +-----END CERTIFICATE----- diff --git a/controllers/testdata/charts/helmchart-0.1.0.tgz b/internal/controller/testdata/charts/helmchart-0.1.0.tgz similarity index 100% rename from controllers/testdata/charts/helmchart-0.1.0.tgz rename to internal/controller/testdata/charts/helmchart-0.1.0.tgz diff --git a/controllers/testdata/charts/helmchart/.helmignore b/internal/controller/testdata/charts/helmchart/.helmignore similarity index 100% rename from controllers/testdata/charts/helmchart/.helmignore rename to internal/controller/testdata/charts/helmchart/.helmignore diff --git a/controllers/testdata/charts/helmchart/Chart.yaml b/internal/controller/testdata/charts/helmchart/Chart.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/Chart.yaml rename to internal/controller/testdata/charts/helmchart/Chart.yaml diff --git a/controllers/testdata/charts/helmchart/duplicate.yaml b/internal/controller/testdata/charts/helmchart/duplicate.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/duplicate.yaml rename to internal/controller/testdata/charts/helmchart/duplicate.yaml diff --git a/controllers/testdata/charts/helmchart/override.yaml b/internal/controller/testdata/charts/helmchart/override.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/override.yaml rename to internal/controller/testdata/charts/helmchart/override.yaml diff --git a/controllers/testdata/charts/helmchart/templates/NOTES.txt b/internal/controller/testdata/charts/helmchart/templates/NOTES.txt similarity index 100% rename from controllers/testdata/charts/helmchart/templates/NOTES.txt rename to internal/controller/testdata/charts/helmchart/templates/NOTES.txt diff --git a/controllers/testdata/charts/helmchart/templates/_helpers.tpl b/internal/controller/testdata/charts/helmchart/templates/_helpers.tpl similarity index 100% rename from controllers/testdata/charts/helmchart/templates/_helpers.tpl rename to internal/controller/testdata/charts/helmchart/templates/_helpers.tpl diff --git a/controllers/testdata/charts/helmchart/templates/deployment.yaml b/internal/controller/testdata/charts/helmchart/templates/deployment.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/deployment.yaml rename to internal/controller/testdata/charts/helmchart/templates/deployment.yaml diff --git a/controllers/testdata/charts/helmchart/templates/ingress.yaml b/internal/controller/testdata/charts/helmchart/templates/ingress.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/ingress.yaml rename to internal/controller/testdata/charts/helmchart/templates/ingress.yaml diff --git a/controllers/testdata/charts/helmchart/templates/service.yaml b/internal/controller/testdata/charts/helmchart/templates/service.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/service.yaml rename to internal/controller/testdata/charts/helmchart/templates/service.yaml diff --git a/controllers/testdata/charts/helmchart/templates/serviceaccount.yaml b/internal/controller/testdata/charts/helmchart/templates/serviceaccount.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/serviceaccount.yaml rename to internal/controller/testdata/charts/helmchart/templates/serviceaccount.yaml diff --git a/controllers/testdata/charts/helmchart/templates/tests/test-connection.yaml b/internal/controller/testdata/charts/helmchart/templates/tests/test-connection.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/tests/test-connection.yaml rename to internal/controller/testdata/charts/helmchart/templates/tests/test-connection.yaml diff --git a/controllers/testdata/charts/helmchart/values.yaml b/internal/controller/testdata/charts/helmchart/values.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/values.yaml rename to internal/controller/testdata/charts/helmchart/values.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/.helmignore b/internal/controller/testdata/charts/helmchartwithdeps/.helmignore similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/.helmignore rename to internal/controller/testdata/charts/helmchartwithdeps/.helmignore diff --git a/controllers/testdata/charts/helmchartwithdeps/Chart.yaml b/internal/controller/testdata/charts/helmchartwithdeps/Chart.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/Chart.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/Chart.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/override.yaml b/internal/controller/testdata/charts/helmchartwithdeps/override.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/override.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/override.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/NOTES.txt b/internal/controller/testdata/charts/helmchartwithdeps/templates/NOTES.txt similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/NOTES.txt rename to internal/controller/testdata/charts/helmchartwithdeps/templates/NOTES.txt diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/_helpers.tpl b/internal/controller/testdata/charts/helmchartwithdeps/templates/_helpers.tpl similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/_helpers.tpl rename to internal/controller/testdata/charts/helmchartwithdeps/templates/_helpers.tpl diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/deployment.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/deployment.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/deployment.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/deployment.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/ingress.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/ingress.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/ingress.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/ingress.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/service.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/service.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/service.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/service.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/values.yaml b/internal/controller/testdata/charts/helmchartwithdeps/values.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/values.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/values.yaml diff --git a/controllers/testdata/git/repository/.sourceignore b/internal/controller/testdata/git/repository/.sourceignore similarity index 100% rename from controllers/testdata/git/repository/.sourceignore rename to internal/controller/testdata/git/repository/.sourceignore diff --git a/controllers/testdata/git/repository/foo.txt b/internal/controller/testdata/git/repository/foo.txt similarity index 100% rename from controllers/testdata/git/repository/foo.txt rename to internal/controller/testdata/git/repository/foo.txt diff --git a/controllers/testdata/git/repository/manifest.yaml b/internal/controller/testdata/git/repository/manifest.yaml similarity index 100% rename from controllers/testdata/git/repository/manifest.yaml rename to internal/controller/testdata/git/repository/manifest.yaml diff --git a/controllers/testdata/git/repowithsubdirs/.sourceignore b/internal/controller/testdata/git/repowithsubdirs/.sourceignore similarity index 100% rename from controllers/testdata/git/repowithsubdirs/.sourceignore rename to internal/controller/testdata/git/repowithsubdirs/.sourceignore diff --git a/controllers/testdata/git/repowithsubdirs/apps/manifest.yaml b/internal/controller/testdata/git/repowithsubdirs/apps/manifest.yaml similarity index 100% rename from controllers/testdata/git/repowithsubdirs/apps/manifest.yaml rename to internal/controller/testdata/git/repowithsubdirs/apps/manifest.yaml diff --git a/controllers/testdata/git/repowithsubdirs/clusters/manifest.yaml b/internal/controller/testdata/git/repowithsubdirs/clusters/manifest.yaml similarity index 100% rename from controllers/testdata/git/repowithsubdirs/clusters/manifest.yaml rename to internal/controller/testdata/git/repowithsubdirs/clusters/manifest.yaml diff --git a/controllers/testdata/git/repowithsubdirs/foo.txt b/internal/controller/testdata/git/repowithsubdirs/foo.txt similarity index 100% rename from controllers/testdata/git/repowithsubdirs/foo.txt rename to internal/controller/testdata/git/repowithsubdirs/foo.txt diff --git a/controllers/testdata/oci/repository/foo.txt b/internal/controller/testdata/oci/repository/foo.txt similarity index 100% rename from controllers/testdata/oci/repository/foo.txt rename to internal/controller/testdata/oci/repository/foo.txt diff --git a/controllers/testdata/podinfo/podinfo-6.1.4.tar b/internal/controller/testdata/podinfo/podinfo-6.1.4.tar similarity index 100% rename from controllers/testdata/podinfo/podinfo-6.1.4.tar rename to internal/controller/testdata/podinfo/podinfo-6.1.4.tar diff --git a/controllers/testdata/podinfo/podinfo-6.1.5.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar similarity index 100% rename from controllers/testdata/podinfo/podinfo-6.1.5.tar rename to internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5.tar differ diff --git a/controllers/testdata/podinfo/podinfo-6.1.6.tar b/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar similarity index 100% rename from controllers/testdata/podinfo/podinfo-6.1.6.tar rename to internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.6.tar b/internal/controller/testdata/podinfo/podinfo-6.1.6.tar new file mode 100644 index 000000000..09616c2df Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.6.tar differ diff --git a/internal/error/error.go b/internal/error/error.go index 0852ba412..cb3a8cd78 100644 --- a/internal/error/error.go +++ b/internal/error/error.go @@ -90,28 +90,6 @@ func NewStalling(err error, reason string) *Stalling { } } -// Event is an error event. It can be used to construct an event to be -// recorded. -// Deprecated: use Generic error with NewGeneric() for the same behavior and -// replace the RecordContextualError with ErrorActionHandler for result -// processing. -type Event struct { - // Reason is the reason for the event error. - Reason string - // Error is the actual error for the event. - Err error -} - -// Error implements error interface. -func (ee *Event) Error() string { - return ee.Err.Error() -} - -// Unwrap returns the underlying error. -func (ee *Event) Unwrap() error { - return ee.Err -} - // Waiting is the reconciliation wait state error. It contains an error, wait // duration and a reason for the wait. It is a contextual error, used to express // the scenario which contributed to the reconciliation result. @@ -176,13 +154,13 @@ func (g *Generic) Unwrap() error { // NewGeneric constructs a new Generic error with default configuration. func NewGeneric(err error, reason string) *Generic { - // Since it's a error, ensure to log and send failure notification. + // Since it's a generic error, it'll be returned to the runtime and logged + // automatically, do not log it. Send failure notification. return &Generic{ Reason: reason, Err: err, Config: Config{ Event: corev1.EventTypeWarning, - Log: true, Notification: true, }, } diff --git a/internal/error/sanitized.go b/internal/error/sanitized.go new file mode 100644 index 000000000..04f6ccf92 --- /dev/null +++ b/internal/error/sanitized.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "fmt" + "net/url" + "regexp" +) + +type SanitizedError struct { + err string +} + +func (e SanitizedError) Error() string { + return e.err +} + +// SanitizeError extracts all URLs from the error message +// and replaces them with the URL without the query string. +func SanitizeError(err error) SanitizedError { + errorMessage := err.Error() + for _, u := range extractURLs(errorMessage) { + urlWithoutQueryString, err := removeQueryString(u) + if err == nil { + re, err := regexp.Compile(fmt.Sprintf("%s*", regexp.QuoteMeta(u))) + if err == nil { + errorMessage = re.ReplaceAllString(errorMessage, urlWithoutQueryString) + } + } + } + + return SanitizedError{errorMessage} +} + +// removeQueryString takes a URL string as input and returns the URL without the query string. +func removeQueryString(urlStr string) (string, error) { + // Parse the URL. + u, err := url.Parse(urlStr) + if err != nil { + return "", err + } + + // Rebuild the URL without the query string. + u.RawQuery = "" + return u.String(), nil +} + +// extractURLs takes a log message as input and returns the URLs found. +func extractURLs(logMessage string) []string { + // Define a regular expression to match a URL. + // This is a simple pattern and might need to be adjusted depending on the log message format. + urlRegex := regexp.MustCompile(`https?://[^\s]+`) + + // Find the first match in the log message. + matches := urlRegex.FindAllString(logMessage, -1) + if len(matches) == 0 { + return []string{} + } + + return matches +} diff --git a/internal/error/sanitized_test.go b/internal/error/sanitized_test.go new file mode 100644 index 000000000..e9c6a858b --- /dev/null +++ b/internal/error/sanitized_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" +) + +func Test_extractURLs(t *testing.T) { + + tests := []struct { + name string + logMessage string + wantUrls []string + }{ + { + name: "Log Contains single URL", + logMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\": dial tcp 20.60.53.129:443: connect: connection refused", + wantUrls: []string{"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\":"}, + }, + { + name: "Log Contains multiple URL", + logMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no : dial tcp 20.60.53.129:443: connect: connection refused", + wantUrls: []string{ + "https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es", + "https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no", + }, + }, + { + name: "Log Contains No URL", + logMessage: "Log message without URL", + wantUrls: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + urls := extractURLs(tt.logMessage) + + g.Expect(len(urls)).To(Equal(len(tt.wantUrls))) + for i := range tt.wantUrls { + g.Expect(urls[i]).To(Equal(tt.wantUrls[i])) + } + }) + } +} + +func Test_removeQueryString(t *testing.T) { + + tests := []struct { + name string + urlStr string + wantUrl string + }{ + { + name: "URL with query string", + urlStr: "https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02", + wantUrl: "https://blobstorage.blob.core.windows.net/container/index.yaml", + }, + { + name: "URL without query string", + urlStr: "https://blobstorage.blob.core.windows.net/container/index.yaml", + wantUrl: "https://blobstorage.blob.core.windows.net/container/index.yaml", + }, + { + name: "URL with query string and port", + urlStr: "https://blobstorage.blob.core.windows.net:443/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02", + wantUrl: "https://blobstorage.blob.core.windows.net:443/container/index.yaml", + }, + { + name: "Invalid URL", + urlStr: "NoUrl", + wantUrl: "NoUrl", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + urlWithoutQueryString, err := removeQueryString(tt.urlStr) + + g.Expect(err).To(BeNil()) + g.Expect(urlWithoutQueryString).To(Equal(tt.wantUrl)) + }) + } +} + +func Test_SanitizeError(t *testing.T) { + + tests := []struct { + name string + errMessage string + wantErrMessage string + }{ + { + name: "Log message with URL with query string", + errMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\": dial tcp 20.60.53.129:443: connect: connection refused", + wantErrMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml dial tcp 20.60.53.129:443: connect: connection refused", + }, + { + name: "Log message without URL", + errMessage: "Log message contains no URL", + wantErrMessage: "Log message contains no URL", + }, + + { + name: "Log message with multiple Urls", + errMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no dial tcp 20.60.53.129:443: connect: connection refused", + wantErrMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml https://blobstorage1.blob.core.windows.net/container/index.yaml dial tcp 20.60.53.129:443: connect: connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := SanitizeError(errors.New(tt.errMessage)) + g.Expect(err.Error()).To(Equal(tt.wantErrMessage)) + }) + } +} diff --git a/internal/features/features.go b/internal/features/features.go index 0449cf41a..edb9beb17 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -19,26 +19,30 @@ limitations under the License. // states. package features -import feathelper "github.com/fluxcd/pkg/runtime/features" +import ( + "github.com/fluxcd/pkg/auth" + feathelper "github.com/fluxcd/pkg/runtime/features" +) const ( - // OptimizedGitClones decreases resource utilization for GitRepository - // reconciliations. It supports both go-git and libgit2 implementations - // when cloning repositories using branches or tags. + // CacheSecretsAndConfigMaps controls whether secrets and configmaps should be cached. // - // When enabled, avoids full clone operations by first checking whether - // the last revision is still the same at the target repository, - // and if that is so, skips the reconciliation. - OptimizedGitClones = "OptimizedGitClones" + // When enabled, it will cache both object types, resulting in increased memory usage + // and cluster-wide RBAC permissions (list and watch). + CacheSecretsAndConfigMaps = "CacheSecretsAndConfigMaps" ) var features = map[string]bool{ - // OptimizedGitClones - // opt-out from v0.25 - OptimizedGitClones: true, + // CacheSecretsAndConfigMaps + // opt-in from v0.34 + CacheSecretsAndConfigMaps: false, +} + +func init() { + auth.SetFeatureGates(features) } -// DefaultFeatureGates contains a list of all supported feature gates and +// FeatureGates contains a list of all supported feature gates and // their default values. func FeatureGates() map[string]bool { return features diff --git a/internal/fs/LICENSE b/internal/fs/LICENSE deleted file mode 100644 index a2dd15faf..000000000 --- a/internal/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/fs/fs.go b/internal/fs/fs.go deleted file mode 100644 index 21cf96e69..000000000 --- a/internal/fs/fs.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "syscall" -) - -// RenameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-device link error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func RenameWithFallback(src, dst string) error { - _, err := os.Stat(src) - if err != nil { - return fmt.Errorf("cannot stat %s: %w", src, err) - } - - err = os.Rename(src, dst) - if err == nil { - return nil - } - - return renameFallback(err, src, dst) -} - -// renameByCopy attempts to rename a file or directory by copying it to the -// destination and then removing the src thus emulating the rename behavior. -func renameByCopy(src, dst string) error { - var cerr error - if dir, _ := IsDir(src); dir { - cerr = CopyDir(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying directory failed: %w", cerr) - } - } else { - cerr = copyFile(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying file failed: %w", cerr) - } - } - - if cerr != nil { - return fmt.Errorf("rename fallback failed: cannot rename %s to %s: %w", src, dst, cerr) - } - - if err := os.RemoveAll(src); err != nil { - return fmt.Errorf("cannot delete %s: %w", src, err) - } - - return nil -} - -var ( - errSrcNotDir = errors.New("source is not a directory") - errDstExist = errors.New("destination already exists") -) - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -func CopyDir(src, dst string) error { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - // We use os.Lstat() here to ensure we don't fall in a loop where a symlink - // actually links to a one of its parent directories. - fi, err := os.Lstat(src) - if err != nil { - return err - } - if !fi.IsDir() { - return errSrcNotDir - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - return errDstExist - } - - if err = os.MkdirAll(dst, fi.Mode()); err != nil { - return fmt.Errorf("cannot mkdir %s: %w", dst, err) - } - - entries, err := os.ReadDir(src) - if err != nil { - return fmt.Errorf("cannot read directory %s: %w", dst, err) - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - if err = CopyDir(srcPath, dstPath); err != nil { - return fmt.Errorf("copying directory failed: %w", err) - } - } else { - // This will include symlinks, which is what we want when - // copying things. - if err = copyFile(srcPath, dstPath); err != nil { - return fmt.Errorf("copying file failed: %w", err) - } - } - } - - return nil -} - -// copyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all its contents will be replaced by the contents -// of the source file. The file mode will be copied from the source. -func copyFile(src, dst string) (err error) { - if sym, err := IsSymlink(src); err != nil { - return fmt.Errorf("symlink check failed: %w", err) - } else if sym { - if err := cloneSymlink(src, dst); err != nil { - if runtime.GOOS == "windows" { - // If cloning the symlink fails on Windows because the user - // does not have the required privileges, ignore the error and - // fall back to copying the file contents. - // - // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx - if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { - return err - } - } else { - return err - } - } else { - return nil - } - } - - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - if _, err = io.Copy(out, in); err != nil { - out.Close() - return - } - - // Check for write errors on Close - if err = out.Close(); err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - - // Temporary fix for Go < 1.9 - // - // See: https://github.com/golang/dep/issues/774 - // and https://github.com/golang/go/issues/20829 - if runtime.GOOS == "windows" { - dst = fixLongPath(dst) - } - err = os.Chmod(dst, si.Mode()) - - return -} - -// cloneSymlink will create a new symlink that points to the resolved path of sl. -// If sl is a relative symlink, dst will also be a relative symlink. -func cloneSymlink(sl, dst string) error { - resolved, err := os.Readlink(sl) - if err != nil { - return err - } - - return os.Symlink(resolved, dst) -} - -// IsDir determines is the path given is a directory or not. -func IsDir(name string) (bool, error) { - fi, err := os.Stat(name) - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, fmt.Errorf("%q is not a directory", name) - } - return true, nil -} - -// IsSymlink determines if the given path is a symbolic link. -func IsSymlink(path string) (bool, error) { - l, err := os.Lstat(path) - if err != nil { - return false, err - } - - return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !isAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} - -func isAbs(path string) (b bool) { - v := volumeName(path) - if v == "" { - return false - } - path = path[len(v):] - if path == "" { - return false - } - return os.IsPathSeparator(path[0]) -} - -func volumeName(path string) (v string) { - if len(path) < 2 { - return "" - } - // with drive letter - c := path[0] - if path[1] == ':' && - ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z') { - return path[:2] - } - // is it UNC - if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && - !os.IsPathSeparator(path[2]) && path[2] != '.' { - // first, leading `\\` and next shouldn't be `\`. its server name. - for n := 3; n < l-1; n++ { - // second, next '\' shouldn't be repeated. - if os.IsPathSeparator(path[n]) { - n++ - // third, following something characters. its share name. - if !os.IsPathSeparator(path[n]) { - if path[n] == '.' { - break - } - for ; n < l; n++ { - if os.IsPathSeparator(path[n]) { - break - } - } - return path[:n] - } - break - } - } - } - return "" -} diff --git a/internal/fs/fs_test.go b/internal/fs/fs_test.go deleted file mode 100644 index 9a1c5ef99..000000000 --- a/internal/fs/fs_test.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync" - "testing" -) - -var ( - mu sync.Mutex -) - -func TestRenameWithFallback(t *testing.T) { - dir := t.TempDir() - - if err := RenameWithFallback(filepath.Join(dir, "does_not_exists"), filepath.Join(dir, "dst")); err == nil { - t.Fatal("expected an error for non existing file, but got nil") - } - - srcpath := filepath.Join(dir, "src") - - if srcf, err := os.Create(srcpath); err != nil { - t.Fatal(err) - } else { - srcf.Close() - } - - if err := RenameWithFallback(srcpath, filepath.Join(dir, "dst")); err != nil { - t.Fatal(err) - } - - srcpath = filepath.Join(dir, "a") - if err := os.MkdirAll(srcpath, 0o770); err != nil { - t.Fatal(err) - } - - dstpath := filepath.Join(dir, "b") - if err := os.MkdirAll(dstpath, 0o770); err != nil { - t.Fatal(err) - } - - if err := RenameWithFallback(srcpath, dstpath); err == nil { - t.Fatal("expected an error if dst is an existing directory, but got nil") - } -} - -func TestCopyDir(t *testing.T) { - dir := t.TempDir() - - srcdir := filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - files := []struct { - path string - contents string - fi os.FileInfo - }{ - {path: "myfile", contents: "hello world"}, - {path: filepath.Join("subdir", "file"), contents: "subdir file"}, - } - - // Create structure indicated in 'files' - for i, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - if err := os.MkdirAll(dn, 0o750); err != nil { - t.Fatal(err) - } - - fh, err := os.Create(fn) - if err != nil { - t.Fatal(err) - } - - if _, err = fh.Write([]byte(file.contents)); err != nil { - t.Fatal(err) - } - fh.Close() - - files[i].fi, err = os.Stat(fn) - if err != nil { - t.Fatal(err) - } - } - - destdir := filepath.Join(dir, "dest") - if err := CopyDir(srcdir, destdir); err != nil { - t.Fatal(err) - } - - // Compare copy against structure indicated in 'files' - for _, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - dirOK, err := IsDir(dn) - if err != nil { - t.Fatal(err) - } - if !dirOK { - t.Fatalf("expected %s to be a directory", dn) - } - - got, err := os.ReadFile(fn) - if err != nil { - t.Fatal(err) - } - - if file.contents != string(got) { - t.Fatalf("expected: %s, got: %s", file.contents, string(got)) - } - - gotinfo, err := os.Stat(fn) - if err != nil { - t.Fatal(err) - } - - if file.fi.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", - file.path, file.fi.Mode(), fn, gotinfo.Mode()) - } - } -} - -func TestCopyDirFail_SrcInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - setupInaccessibleDir(t, func(dir string) error { - srcdir = filepath.Join(dir, "src") - return os.MkdirAll(srcdir, 0o750) - }) - - dir := t.TempDir() - - dstdir = filepath.Join(dir, "dst") - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_DstInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dst") - return nil - }) - - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_SrcIsNotDir(t *testing.T) { - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if _, err := os.Create(srcdir); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - err := CopyDir(srcdir, dstdir) - if err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errSrcNotDir { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err) - } - -} - -func TestCopyDirFail_DstExists(t *testing.T) { - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - if err := os.MkdirAll(dstdir, 0o750); err != nil { - t.Fatal(err) - } - - err := CopyDir(srcdir, dstdir) - if err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errDstExist { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err) - } -} - -func TestCopyDirFailOpen(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. os.Chmod(..., 0o222) below is not - // enough for the file to be readonly, and os.Chmod(..., - // 0000) returns an invalid argument error. Skipping - // this this until a compatible implementation is - // provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - srcfn := filepath.Join(srcdir, "file") - srcf, err := os.Create(srcfn) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - // setup source file so that it cannot be read - if err = os.Chmod(srcfn, 0o220); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyFile(t *testing.T) { - dir := t.TempDir() - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - - want := "hello world" - if _, err := srcf.Write([]byte(want)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destf := filepath.Join(dir, "destf") - if err := copyFile(srcf.Name(), destf); err != nil { - t.Fatal(err) - } - - got, err := os.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if want != string(got) { - t.Fatalf("expected: %s, got: %s", want, string(got)) - } - - wantinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - gotinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if wantinfo.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode()) - } -} - -func TestCopyFileSymlink(t *testing.T) { - dir := t.TempDir() - defer cleanUpDir(dir) - - testcases := map[string]string{ - filepath.Join("./testdata/symlinks/file-symlink"): filepath.Join(dir, "dst-file"), - filepath.Join("./testdata/symlinks/windows-file-symlink"): filepath.Join(dir, "windows-dst-file"), - filepath.Join("./testdata/symlinks/invalid-symlink"): filepath.Join(dir, "invalid-symlink"), - } - - for symlink, dst := range testcases { - t.Run(symlink, func(t *testing.T) { - var err error - if err = copyFile(symlink, dst); err != nil { - t.Fatalf("failed to copy symlink: %s", err) - } - - var want, got string - - if runtime.GOOS == "windows" { - // Creating symlinks on Windows require an additional permission - // regular users aren't granted usually. So we copy the file - // content as a fall back instead of creating a real symlink. - srcb, err := os.ReadFile(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - dstb, err := os.ReadFile(dst) - if err != nil { - t.Fatalf("%+v", err) - } - - want = string(srcb) - got = string(dstb) - } else { - want, err = os.Readlink(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - - got, err = os.Readlink(dst) - if err != nil { - t.Fatalf("could not resolve symlink: %s", err) - } - } - - if want != got { - t.Fatalf("resolved path is incorrect. expected %s, got %s", want, got) - } - }) - } -} - -func TestCopyFileLongFilePath(t *testing.T) { - if runtime.GOOS != "windows" { - // We want to ensure the temporary fix actually fixes the issue with - // os.Chmod and long file paths. This is only applicable on Windows. - t.Skip("skipping on non-windows") - } - - dir := t.TempDir() - - // Create a directory with a long-enough path name to cause the bug in #774. - dirName := "" - for len(dir+string(os.PathSeparator)+dirName) <= 300 { - dirName += "directory" - } - - fullPath := filepath.Join(dir, dirName, string(os.PathSeparator)) - if err := os.MkdirAll(fullPath, 0o750); err != nil && !os.IsExist(err) { - t.Fatalf("%+v", fmt.Errorf("unable to create temp directory: %s", fullPath)) - } - - err := os.WriteFile(fullPath+"src", []byte(nil), 0o640) - if err != nil { - t.Fatalf("%+v", err) - } - - err = copyFile(fullPath+"src", fullPath+"dst") - if err != nil { - t.Fatalf("unexpected error while copying file: %v", err) - } -} - -// C:\Users\appveyor\AppData\Local\Temp\1\gotest639065787\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890 - -func TestCopyFileFail(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - dir := t.TempDir() - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - var dstdir string - - setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dir") - return os.Mkdir(dstdir, 0o770) - }) - - fn := filepath.Join(dstdir, "file") - if err := copyFile(srcf.Name(), fn); err == nil { - t.Fatalf("expected error for %s, got none", fn) - } -} - -// setupInaccessibleDir creates a temporary location with a single -// directory in it, in such a way that that directory is not accessible -// after this function returns. -// -// op is called with the directory as argument, so that it can create -// files or other test artifacts. -// -// If setupInaccessibleDir fails in its preparation, or op fails, t.Fatal -// will be invoked. -func setupInaccessibleDir(t *testing.T, op func(dir string) error) { - dir, err := os.MkdirTemp("", "dep") - if err != nil { - t.Fatal(err) - } - - subdir := filepath.Join(dir, "dir") - - t.Cleanup(func() { - if err := os.Chmod(subdir, 0o770); err != nil { - t.Error(err) - } - }) - - if err := os.Mkdir(subdir, 0o770); err != nil { - t.Fatal(err) - } - - if err := op(subdir); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(subdir, 0o660); err != nil { - t.Fatal(err) - } -} - -func TestIsDir(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - var dn string - - setupInaccessibleDir(t, func(dir string) error { - dn = filepath.Join(dir, "dir") - return os.Mkdir(dn, 0o770) - }) - - tests := map[string]struct { - exists bool - err bool - }{ - wd: {true, false}, - filepath.Join(wd, "testdata"): {true, false}, - filepath.Join(wd, "main.go"): {false, true}, - filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true}, - dn: {false, true}, - } - - if runtime.GOOS == "windows" { - // This test doesn't work on Microsoft Windows because - // of the differences in how file permissions are - // implemented. For this to work, the directory where - // the directory exists should be inaccessible. - delete(tests, dn) - } - - for f, want := range tests { - got, err := IsDir(f) - if err != nil && !want.err { - t.Fatalf("expected no error, got %v", err) - } - - if got != want.exists { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - } -} - -func TestIsSymlink(t *testing.T) { - dir := t.TempDir() - - dirPath := filepath.Join(dir, "directory") - if err := os.MkdirAll(dirPath, 0o770); err != nil { - t.Fatal(err) - } - - filePath := filepath.Join(dir, "file") - f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } - f.Close() - - dirSymlink := filepath.Join(dir, "dirSymlink") - fileSymlink := filepath.Join(dir, "fileSymlink") - - if err = os.Symlink(dirPath, dirSymlink); err != nil { - t.Fatal(err) - } - if err = os.Symlink(filePath, fileSymlink); err != nil { - t.Fatal(err) - } - - var ( - inaccessibleFile string - inaccessibleSymlink string - ) - - setupInaccessibleDir(t, func(dir string) error { - inaccessibleFile = filepath.Join(dir, "file") - if fh, err := os.Create(inaccessibleFile); err != nil { - return err - } else if err = fh.Close(); err != nil { - return err - } - - inaccessibleSymlink = filepath.Join(dir, "symlink") - return os.Symlink(inaccessibleFile, inaccessibleSymlink) - }) - - tests := map[string]struct{ expected, err bool }{ - dirPath: {false, false}, - filePath: {false, false}, - dirSymlink: {true, false}, - fileSymlink: {true, false}, - inaccessibleFile: {false, true}, - inaccessibleSymlink: {false, true}, - } - - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in Windows. Skipping - // these cases until a compatible implementation is provided. - delete(tests, inaccessibleFile) - delete(tests, inaccessibleSymlink) - } - - for path, want := range tests { - got, err := IsSymlink(path) - if err != nil { - if !want.err { - t.Errorf("expected no error, got %v", err) - } - } - - if got != want.expected { - t.Errorf("expected %t for %s, got %t", want.expected, path, got) - } - } -} - -func cleanUpDir(dir string) { - if runtime.GOOS == "windows" { - mu.Lock() - exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run() - mu.Unlock() - } - if dir != "" { - os.RemoveAll(dir) - } -} diff --git a/internal/fs/rename.go b/internal/fs/rename.go deleted file mode 100644 index bad1f4778..000000000 --- a/internal/fs/rename.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows -// +build !windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } else if terr.Err != syscall.EXDEV { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/rename_windows.go b/internal/fs/rename_windows.go deleted file mode 100644 index fa9a0b4d9..000000000 --- a/internal/fs/rename_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } - - if terr.Err != syscall.EXDEV { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr != 0x11 { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/testdata/symlinks/dir-symlink b/internal/fs/testdata/symlinks/dir-symlink deleted file mode 120000 index 777ebd014..000000000 --- a/internal/fs/testdata/symlinks/dir-symlink +++ /dev/null @@ -1 +0,0 @@ -../../testdata \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/file-symlink b/internal/fs/testdata/symlinks/file-symlink deleted file mode 120000 index 4c52274de..000000000 --- a/internal/fs/testdata/symlinks/file-symlink +++ /dev/null @@ -1 +0,0 @@ -../test.file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/invalid-symlink b/internal/fs/testdata/symlinks/invalid-symlink deleted file mode 120000 index 0edf4f301..000000000 --- a/internal/fs/testdata/symlinks/invalid-symlink +++ /dev/null @@ -1 +0,0 @@ -/non/existing/file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/windows-file-symlink b/internal/fs/testdata/symlinks/windows-file-symlink deleted file mode 120000 index af1d6c8f5..000000000 --- a/internal/fs/testdata/symlinks/windows-file-symlink +++ /dev/null @@ -1 +0,0 @@ -C:/Users/ibrahim/go/src/github.com/golang/dep/internal/fs/testdata/test.file \ No newline at end of file diff --git a/internal/fs/testdata/test.file b/internal/fs/testdata/test.file deleted file mode 100644 index e69de29bb..000000000 diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go index 76dc517c7..6ac896e78 100644 --- a/internal/helm/chart/builder.go +++ b/internal/helm/chart/builder.go @@ -24,10 +24,11 @@ import ( "regexp" "strings" + sourcefs "github.com/fluxcd/pkg/oci" helmchart "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chartutil" - "github.com/fluxcd/source-controller/internal/fs" + "github.com/fluxcd/source-controller/internal/oci" ) // Reference holds information to locate a chart. @@ -81,9 +82,9 @@ func (r RemoteReference) Validate() error { if r.Name == "" { return fmt.Errorf("no name set for remote chart reference") } - name := regexp.MustCompile("^([-a-z0-9]+/?)+$") + name := regexp.MustCompile(`^([-a-z0-9]+/?\.?)+$`) if !name.MatchString(r.Name) { - return fmt.Errorf("invalid chart name '%s': a valid name must be lower case letters and numbers and MAY be separated with dashes (-) or slashes (/)", r.Name) + return fmt.Errorf("invalid chart name '%s': a valid name must be lower case letters and numbers and MAY be separated with dashes (-), slashes (/) or periods (.)", r.Name) } return nil } @@ -106,6 +107,12 @@ type BuildOptions struct { // ValuesFiles can be set to a list of relative paths, used to compose // and overwrite an alternative default "values.yaml" for the chart. ValuesFiles []string + // CachedChartValuesFiles is a list of relative paths that were used to + // build the cached chart. + CachedChartValuesFiles []string + // IgnoreMissingValuesFiles controls whether to silently ignore missing + // values files rather than failing. + IgnoreMissingValuesFiles bool // CachedChart can be set to the absolute path of a chart stored on // the local filesystem, and is used for simple validation by metadata // comparisons. @@ -113,6 +120,8 @@ type BuildOptions struct { // Force can be set to force the build of the chart, for example // because the list of ValuesFiles has changed. Force bool + // Verifier can be set to the verification of the chart. + Verify bool } // GetValuesFiles returns BuildOptions.ValuesFiles, except if it equals @@ -144,6 +153,9 @@ type Build struct { // This can for example be false if ValuesFiles is empty and the chart // source was already packaged. Packaged bool + // VerifiedResult indicates the results of verifying the chart. + // If no verification was performed, this field should be VerificationResultIgnored. + VerifiedResult oci.VerificationResult } // Summary returns a human-readable summary of the Build. @@ -207,7 +219,7 @@ func packageToPath(chart *helmchart.Chart, out string) error { if err != nil { return fmt.Errorf("failed to package chart: %w", err) } - if err = fs.RenameWithFallback(p, out); err != nil { + if err = sourcefs.RenameWithFallback(p, out); err != nil { return fmt.Errorf("failed to write chart to file: %w", err) } return nil diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go index 0e0b20c28..44399a80a 100644 --- a/internal/helm/chart/builder_local.go +++ b/internal/helm/chart/builder_local.go @@ -121,6 +121,11 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart result.ValuesFiles = opts.GetValuesFiles() + if opts.CachedChartValuesFiles != nil { + // If the cached chart values files are set, we should use them + // instead of reporting the values files. + result.ValuesFiles = opts.CachedChartValuesFiles + } result.Packaged = requiresPackaging return result, nil @@ -140,9 +145,12 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, } // Merge chart values, if instructed - var mergedValues map[string]interface{} + var ( + mergedValues map[string]interface{} + valuesFiles []string + ) if len(opts.GetValuesFiles()) > 0 { - if mergedValues, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles); err != nil { + if mergedValues, valuesFiles, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles, opts.IgnoreMissingValuesFiles); err != nil { return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } } @@ -163,7 +171,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if err != nil { return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } - result.ValuesFiles = opts.GetValuesFiles() + result.ValuesFiles = valuesFiles } // Ensure dependencies are fetched if building from a directory @@ -187,31 +195,42 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, } // mergeFileValues merges the given value file paths into a single "values.yaml" map. -// The provided (relative) paths may not traverse outside baseDir. It returns the merge -// result, or an error. -func mergeFileValues(baseDir string, paths []string) (map[string]interface{}, error) { +// The provided (relative) paths may not traverse outside baseDir. By default, a missing +// file is considered an error. If ignoreMissing is true, missing files are ignored. +// It returns the merge result and the list of files that contributed to that result, +// or an error. +func mergeFileValues(baseDir string, paths []string, ignoreMissing bool) (map[string]interface{}, []string, error) { mergedValues := make(map[string]interface{}) + valuesFiles := make([]string, 0, len(paths)) for _, p := range paths { secureP, err := securejoin.SecureJoin(baseDir, p) if err != nil { - return nil, err + return nil, nil, err } - if f, err := os.Stat(secureP); err != nil || !f.Mode().IsRegular() { - return nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", + f, err := os.Stat(secureP) + switch { + case err != nil: + if ignoreMissing && os.IsNotExist(err) { + continue + } + fallthrough + case !f.Mode().IsRegular(): + return nil, nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", strings.TrimPrefix(secureP, baseDir), p) } b, err := os.ReadFile(secureP) if err != nil { - return nil, fmt.Errorf("could not read values from file '%s': %w", p, err) + return nil, nil, fmt.Errorf("could not read values from file '%s': %w", p, err) } values := make(map[string]interface{}) err = yaml.Unmarshal(b, &values) if err != nil { - return nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) + return nil, nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) } mergedValues = transform.MergeMaps(mergedValues, values) + valuesFiles = append(valuesFiles, p) } - return mergedValues, nil + return mergedValues, valuesFiles, nil } // copyFileToPath attempts to copy in to out. It returns an error if out already exists. diff --git a/internal/helm/chart/builder_local_test.go b/internal/helm/chart/builder_local_test.go index 626dc072e..4b26e1419 100644 --- a/internal/helm/chart/builder_local_test.go +++ b/internal/helm/chart/builder_local_test.go @@ -93,7 +93,7 @@ func TestLocalBuilder_Build(t *testing.T) { name: "invalid version metadata", reference: LocalReference{Path: "../testdata/charts/helmchart"}, buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -281,11 +281,13 @@ func TestLocalBuilder_Build_CachedChart(t *testing.T) { func Test_mergeFileValues(t *testing.T) { tests := []struct { - name string - files []*helmchart.File - paths []string - want map[string]interface{} - wantErr string + name string + files []*helmchart.File + paths []string + ignoreMissing bool + wantValues map[string]interface{} + wantFiles []string + wantErr string }{ { name: "merges values from files", @@ -295,10 +297,11 @@ func Test_mergeFileValues(t *testing.T) { {Name: "c.yaml", Data: []byte("b: d")}, }, paths: []string{"a.yaml", "b.yaml", "c.yaml"}, - want: map[string]interface{}{ + wantValues: map[string]interface{}{ "a": "b", "b": "d", }, + wantFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, }, { name: "illegal traverse", @@ -318,6 +321,25 @@ func Test_mergeFileValues(t *testing.T) { paths: []string{"a.yaml"}, wantErr: "no values file found at path '/a.yaml'", }, + { + name: "ignore missing files", + files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + }, + paths: []string{"a.yaml", "b.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{"a.yaml"}, + }, + { + name: "all files missing", + paths: []string{"a.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{}, + wantFiles: []string{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -329,16 +351,18 @@ func Test_mergeFileValues(t *testing.T) { g.Expect(os.WriteFile(filepath.Join(baseDir, f.Name), f.Data, 0o640)).To(Succeed()) } - got, err := mergeFileValues(baseDir, tt.paths) + gotValues, gotFiles, err := mergeFileValues(baseDir, tt.paths, tt.ignoreMissing) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(got).To(BeNil()) + g.Expect(gotValues).To(BeNil()) + g.Expect(gotFiles).To(BeNil()) return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(gotValues).To(Equal(tt.wantValues)) + g.Expect(gotFiles).To(Equal(tt.wantFiles)) }) } } diff --git a/internal/helm/chart/builder_remote.go b/internal/helm/chart/builder_remote.go index d15e24299..2cfdf81b4 100644 --- a/internal/helm/chart/builder_remote.go +++ b/internal/helm/chart/builder_remote.go @@ -30,11 +30,12 @@ import ( "helm.sh/helm/v3/pkg/repo" "sigs.k8s.io/yaml" + sourcefs "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/transform" - "github.com/fluxcd/source-controller/internal/fs" "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" "github.com/fluxcd/source-controller/internal/helm/repository" + "github.com/fluxcd/source-controller/internal/oci" ) type remoteChartBuilder struct { @@ -63,7 +64,7 @@ func NewRemoteBuilder(repository repository.Downloader) Builder { // After downloading the chart, it is only packaged if required due to BuildOptions // modifying the chart, otherwise the exact data as retrieved from the repository // is written to p, after validating it to be a chart. -func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, opts BuildOptions) (*Build, error) { +func (b *remoteChartBuilder) Build(ctx context.Context, ref Reference, p string, opts BuildOptions) (*Build, error) { remoteRef, ok := ref.(RemoteReference) if !ok { err := fmt.Errorf("expected remote chart reference") @@ -74,9 +75,9 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o return nil, &BuildError{Reason: ErrChartReference, Err: err} } - res, result, err := b.downloadFromRepository(b.remote, remoteRef, opts) + res, result, err := b.downloadFromRepository(ctx, b.remote, remoteRef, opts) if err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, err } if res == nil { return result, nil @@ -102,7 +103,7 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o } chart.Metadata.Version = result.Version - mergedValues, err := mergeChartValues(chart, opts.ValuesFiles) + mergedValues, valuesFiles, err := mergeChartValues(chart, opts.ValuesFiles, opts.IgnoreMissingValuesFiles) if err != nil { err = fmt.Errorf("failed to merge chart values: %w", err) return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} @@ -112,7 +113,7 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o if err != nil { return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } - result.ValuesFiles = opts.GetValuesFiles() + result.ValuesFiles = valuesFiles } // Package the chart with the custom values @@ -124,12 +125,30 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o return result, nil } -func (b *remoteChartBuilder) downloadFromRepository(remote repository.Downloader, remoteRef RemoteReference, opts BuildOptions) (*bytes.Buffer, *Build, error) { +func (b *remoteChartBuilder) downloadFromRepository(ctx context.Context, remote repository.Downloader, remoteRef RemoteReference, opts BuildOptions) (*bytes.Buffer, *Build, error) { // Get the current version for the RemoteReference cv, err := remote.GetChartVersion(remoteRef.Name, remoteRef.Version) if err != nil { + var reason BuildErrorReason + switch err.(type) { + case *repository.ErrReference: + reason = ErrChartReference + case *repository.ErrExternal: + reason = ErrChartPull + default: + reason = ErrUnknown + } err = fmt.Errorf("failed to get chart version for remote reference: %w", err) - return nil, nil, &BuildError{Reason: ErrChartReference, Err: err} + return nil, nil, &BuildError{Reason: reason, Err: err} + } + + verifiedResult := oci.VerificationResultIgnored + + // Verify the chart if necessary + if opts.Verify { + if verifiedResult, err = remote.VerifyChart(ctx, cv); err != nil { + return nil, nil, &BuildError{Reason: ErrChartVerification, Err: err} + } } result, shouldReturn, err := generateBuildResult(cv, opts) @@ -137,6 +156,8 @@ func (b *remoteChartBuilder) downloadFromRepository(remote repository.Downloader return nil, nil, err } + result.VerifiedResult = verifiedResult + if shouldReturn { return nil, result, nil } @@ -157,6 +178,7 @@ func generateBuildResult(cv *repo.ChartVersion, opts BuildOptions) (*Build, bool result := &Build{} result.Version = cv.Version result.Name = cv.Name + result.VerifiedResult = oci.VerificationResultIgnored // Set build specific metadata if instructed if opts.VersionMetadata != "" { @@ -181,6 +203,11 @@ func generateBuildResult(cv *repo.ChartVersion, opts BuildOptions) (*Build, bool if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart result.ValuesFiles = opts.GetValuesFiles() + if opts.CachedChartValuesFiles != nil { + // If the cached chart values files are set, we should use them + // instead of reporting the values files. + result.ValuesFiles = opts.CachedChartValuesFiles + } result.Packaged = requiresPackaging return result, true, nil } @@ -204,13 +231,18 @@ func setBuildMetaData(version, versionMetadata string) (*semver.Version, error) } // mergeChartValues merges the given chart.Chart Files paths into a single "values.yaml" map. -// It returns the merge result, or an error. -func mergeChartValues(chart *helmchart.Chart, paths []string) (map[string]interface{}, error) { +// By default, a missing file is considered an error. If ignoreMissing is set true, +// missing files are ignored. +// It returns the merge result and the list of files that contributed to that result, +// or an error. +func mergeChartValues(chart *helmchart.Chart, paths []string, ignoreMissing bool) (map[string]interface{}, []string, error) { mergedValues := make(map[string]interface{}) + valuesFiles := make([]string, 0, len(paths)) for _, p := range paths { cfn := filepath.Clean(p) if cfn == chartutil.ValuesfileName { mergedValues = transform.MergeMaps(mergedValues, chart.Values) + valuesFiles = append(valuesFiles, p) continue } var b []byte @@ -221,15 +253,19 @@ func mergeChartValues(chart *helmchart.Chart, paths []string) (map[string]interf } } if b == nil { - return nil, fmt.Errorf("no values file found at path '%s'", p) + if ignoreMissing { + continue + } + return nil, nil, fmt.Errorf("no values file found at path '%s'", p) } values := make(map[string]interface{}) if err := yaml.Unmarshal(b, &values); err != nil { - return nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) + return nil, nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) } mergedValues = transform.MergeMaps(mergedValues, values) + valuesFiles = append(valuesFiles, p) } - return mergedValues, nil + return mergedValues, valuesFiles, nil } // validatePackageAndWriteToPath atomically writes the packaged chart from reader @@ -254,7 +290,7 @@ func validatePackageAndWriteToPath(reader io.Reader, out string) error { if err = meta.Validate(); err != nil { return fmt.Errorf("failed to validate metadata of written chart: %w", err) } - if err = fs.RenameWithFallback(tmpFile.Name(), out); err != nil { + if err = sourcefs.RenameWithFallback(tmpFile.Name(), out); err != nil { return fmt.Errorf("failed to write chart to file: %w", err) } return nil diff --git a/internal/helm/chart/builder_remote_test.go b/internal/helm/chart/builder_remote_test.go index c72f19d88..7994fa5ee 100644 --- a/internal/helm/chart/builder_remote_test.go +++ b/internal/helm/chart/builder_remote_test.go @@ -99,6 +99,7 @@ entries: - https://example.com/grafana.tgz description: string version: 6.17.4 + name: grafana `) mockGetter := &mockIndexChartGetter{ @@ -151,7 +152,7 @@ entries: reference: RemoteReference{Name: "grafana"}, repository: mockRepo(), buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -193,10 +194,9 @@ entries: targetPath := filepath.Join(tmpDir, "chart.tgz") if tt.repository != nil { - _, err := tt.repository.CacheIndex() - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(tt.repository.CacheIndex()).ToNot(HaveOccurred()) // Cleanup the cache index path. - defer os.Remove(tt.repository.CachePath) + defer os.Remove(tt.repository.Path) } b := NewRemoteBuilder(tt.repository) @@ -225,7 +225,7 @@ entries: } } -func TestRemoteBuilder_BuildFromOCIChatRepository(t *testing.T) { +func TestRemoteBuilder_BuildFromOCIChartRepository(t *testing.T) { g := NewWithT(t) chartGrafana, err := os.ReadFile("./../testdata/charts/helmchart-0.1.0.tgz") @@ -293,14 +293,14 @@ func TestRemoteBuilder_BuildFromOCIChatRepository(t *testing.T) { name: "chart version not in repository", reference: RemoteReference{Name: "grafana", Version: "1.1.1"}, repository: mockRepoWithoutChart(), - wantErr: "failed to download chart for remote reference", + wantErr: "failed to download chart for remote reference: failed to get", }, { name: "invalid version metadata", reference: RemoteReference{Name: "grafana"}, repository: mockRepo(), buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -411,10 +411,10 @@ entries: reference := RemoteReference{Name: "helmchart"} repository := mockRepo() - _, err = repository.CacheIndex() + err = repository.CacheIndex() g.Expect(err).ToNot(HaveOccurred()) // Cleanup the cache index path. - defer os.Remove(repository.CachePath) + defer os.Remove(repository.Path) b := NewRemoteBuilder(repository) @@ -444,11 +444,13 @@ entries: func Test_mergeChartValues(t *testing.T) { tests := []struct { - name string - chart *helmchart.Chart - paths []string - want map[string]interface{} - wantErr string + name string + chart *helmchart.Chart + paths []string + ignoreMissing bool + wantValues map[string]interface{} + wantFiles []string + wantErr string }{ { name: "merges values", @@ -460,10 +462,11 @@ func Test_mergeChartValues(t *testing.T) { }, }, paths: []string{"a.yaml", "b.yaml", "c.yaml"}, - want: map[string]interface{}{ + wantValues: map[string]interface{}{ "a": "b", "b": "d", }, + wantFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, }, { name: "uses chart values", @@ -476,10 +479,11 @@ func Test_mergeChartValues(t *testing.T) { }, }, paths: []string{chartutil.ValuesfileName, "c.yaml"}, - want: map[string]interface{}{ + wantValues: map[string]interface{}{ "a": "b", "b": "d", }, + wantFiles: []string{chartutil.ValuesfileName, "c.yaml"}, }, { name: "unmarshal error", @@ -497,21 +501,59 @@ func Test_mergeChartValues(t *testing.T) { paths: []string{"a.yaml"}, wantErr: "no values file found at path 'a.yaml'", }, + { + name: "merges values ignoring file missing", + chart: &helmchart.Chart{ + Files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + }, + }, + paths: []string{"a.yaml", "b.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{"a.yaml"}, + }, + { + name: "merges values ignoring all missing", + chart: &helmchart.Chart{}, + paths: []string{"a.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{}, + wantFiles: []string{}, + }, + { + name: "uses chart values ignoring missing file", + chart: &helmchart.Chart{ + Values: map[string]interface{}{ + "a": "b", + }, + }, + paths: []string{chartutil.ValuesfileName, "c.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{chartutil.ValuesfileName}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := mergeChartValues(tt.chart, tt.paths) + gotValues, gotFiles, err := mergeChartValues(tt.chart, tt.paths, tt.ignoreMissing) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(got).To(BeNil()) + g.Expect(gotValues).To(BeNil()) + g.Expect(gotFiles).To(BeNil()) return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(gotValues).To(Equal(tt.wantValues)) + g.Expect(gotFiles).To(Equal(tt.wantFiles)) }) } } diff --git a/internal/helm/chart/builder_test.go b/internal/helm/chart/builder_test.go index 0fac78cbe..d3fa55e38 100644 --- a/internal/helm/chart/builder_test.go +++ b/internal/helm/chart/builder_test.go @@ -17,8 +17,8 @@ limitations under the License. package chart import ( + "crypto/rand" "encoding/hex" - "math/rand" "os" "path/filepath" "testing" @@ -113,6 +113,15 @@ func TestRemoteReference_Validate(t *testing.T) { ref: RemoteReference{Name: "not//a/valid/chart"}, wantErr: "invalid chart name 'not//a/valid/chart'", }, + { + name: "ref with period in name", + ref: RemoteReference{Name: "valid.chart.name"}, + }, + { + name: "ref with double period in name", + ref: RemoteReference{Name: "../valid-chart-name"}, + wantErr: "invalid chart name '../valid-chart-name", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/helm/chart/dependency_manager.go b/internal/helm/chart/dependency_manager.go index 1fbe6328c..8a3f0ccfb 100644 --- a/internal/helm/chart/dependency_manager.go +++ b/internal/helm/chart/dependency_manager.go @@ -218,6 +218,10 @@ func (dm *DependencyManager) addLocalDependency(ref LocalReference, c *chartWith return err } + if dep.Alias != "" { + ch.Metadata.Name = dep.Alias + } + c.mu.Lock() c.AddDependency(ch) c.mu.Unlock() @@ -246,6 +250,10 @@ func (dm *DependencyManager) addRemoteDependency(chart *chartWithLock, dep *helm return fmt.Errorf("failed to load downloaded archive of version '%s': %w", ver.Version, err) } + if dep.Alias != "" { + ch.Metadata.Name = dep.Alias + } + chart.mu.Lock() chart.AddDependency(ch) chart.mu.Unlock() @@ -258,7 +266,10 @@ func (dm *DependencyManager) resolveRepository(url string) (repo repository.Down dm.mu.Lock() defer dm.mu.Unlock() - nUrl := repository.NormalizeURL(url) + nUrl, err := repository.NormalizeURL(url) + if err != nil { + return + } err = repository.ValidateDepURL(nUrl) if err != nil { return @@ -285,6 +296,9 @@ func (dm *DependencyManager) resolveRepository(url string) (repo repository.Down // It does not allow the dependency's path to be outside the scope of // LocalReference.WorkDir. func (dm *DependencyManager) secureLocalChartPath(ref LocalReference, dep *helmchart.Dependency) (string, error) { + if dep.Repository == "" { + return securejoin.SecureJoin(ref.WorkDir, filepath.Join(ref.Path, "charts", dep.Name)) + } localUrl, err := url.Parse(dep.Repository) if err != nil { return "", fmt.Errorf("failed to parse alleged local chart reference: %w", err) diff --git a/internal/helm/chart/dependency_manager_test.go b/internal/helm/chart/dependency_manager_test.go index d63e5f153..241959fbe 100644 --- a/internal/helm/chart/dependency_manager_test.go +++ b/internal/helm/chart/dependency_manager_test.go @@ -86,11 +86,6 @@ func TestDependencyManager_Clear(t *testing.T) { Index: repo.NewIndexFile(), RWMutex: &sync.RWMutex{}, }, - "cached cache path": &repository.ChartRepository{ - CachePath: "/invalid/path/resets", - Cached: true, - RWMutex: &sync.RWMutex{}, - }, "with credentials": ociRepoWithCreds, "without credentials": &repository.OCIChartRepository{}, "nil downloader": nil, @@ -103,8 +98,6 @@ func TestDependencyManager_Clear(t *testing.T) { switch v := v.(type) { case *repository.ChartRepository: g.Expect(v.Index).To(BeNil()) - g.Expect(v.CachePath).To(BeEmpty()) - g.Expect(v.Cached).To(BeFalse()) case *repository.OCIChartRepository: g.Expect(v.HasCredentials()).To(BeFalse()) } @@ -297,13 +290,15 @@ func TestDependencyManager_build(t *testing.T) { func TestDependencyManager_addLocalDependency(t *testing.T) { tests := []struct { - name string - dep *helmchart.Dependency - wantErr string - wantFunc func(g *WithT, c *helmchart.Chart) + name string + chartName string + dep *helmchart.Dependency + wantErr string + wantFunc func(g *WithT, c *helmchart.Chart) }{ { - name: "local dependency", + name: "local dependency", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: chartVersion, @@ -314,7 +309,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { }, }, { - name: "version not matching constraint", + name: "version not matching constraint", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: "0.2.0", @@ -323,7 +319,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { wantErr: "can't get a valid version for constraint '0.2.0'", }, { - name: "invalid local reference", + name: "invalid local reference", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: chartVersion, @@ -332,7 +329,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { wantErr: "no chart found at '/absolutely/invalid'", }, { - name: "invalid chart archive", + name: "invalid chart archive", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: chartVersion, @@ -341,7 +339,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { wantErr: "failed to load chart from '/empty.tgz'", }, { - name: "invalid constraint", + name: "invalid constraint", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: "invalid", @@ -349,6 +348,26 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { }, wantErr: "invalid version/constraint format 'invalid'", }, + { + name: "no repository", + chartName: "helmchartwithdepsnorepo", + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + }, + wantFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + }, + }, + { + name: "no repository invalid reference", + chartName: "helmchartwithdepsnorepo", + dep: &helmchart.Dependency{ + Name: "nonexistingchart", + Version: chartVersion, + }, + wantErr: "no chart found at '/helmchartwithdepsnorepo/charts/nonexistingchart'", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -360,7 +379,7 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { absWorkDir, err := filepath.Abs("../testdata/charts") g.Expect(err).ToNot(HaveOccurred()) - err = dm.addLocalDependency(LocalReference{WorkDir: absWorkDir, Path: "helmchartwithdeps"}, + err = dm.addLocalDependency(LocalReference{WorkDir: absWorkDir, Path: tt.chartName}, &chartWithLock{Chart: chart}, tt.dep) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) @@ -441,14 +460,14 @@ func TestDependencyManager_addRemoteDependency(t *testing.T) { name: "strategic load error", downloaders: map[string]repository.Downloader{ "https://example.com/": &repository.ChartRepository{ - CachePath: "/invalid/cache/path/foo", - RWMutex: &sync.RWMutex{}, + Client: &mockGetter{}, + RWMutex: &sync.RWMutex{}, }, }, dep: &helmchart.Dependency{ Repository: "https://example.com", }, - wantErr: "failed to strategically load index", + wantErr: "failed to load index", }, { name: "repository get error", @@ -851,6 +870,15 @@ func TestDependencyManager_secureLocalChartPath(t *testing.T) { }, wantErr: "not a local chart reference", }, + { + name: "local dependency with empty repository", + dep: &helmchart.Dependency{ + Name: "some-subchart", + }, + baseDir: "/tmp/workdir", + path: "/chart", + want: "/tmp/workdir/chart/charts/some-subchart", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/helm/chart/errors.go b/internal/helm/chart/errors.go index 5b3a5bec0..7b1b7f3b0 100644 --- a/internal/helm/chart/errors.go +++ b/internal/helm/chart/errors.go @@ -53,8 +53,9 @@ func (e *BuildError) Error() string { // Is returns true if the Reason or Err equals target. // It can be used to programmatically place an arbitrary Err in the // context of the Builder: -// err := &BuildError{Reason: ErrChartPull, Err: errors.New("arbitrary transport error")} -// errors.Is(err, ErrChartPull) +// +// err := &BuildError{Reason: ErrChartPull, Err: errors.New("arbitrary transport error")} +// errors.Is(err, ErrChartPull) func (e *BuildError) Is(target error) bool { if e.Reason == target { return true @@ -83,5 +84,6 @@ var ( ErrValuesFilesMerge = BuildErrorReason{Reason: "ValuesFilesError", Summary: "values files merge error"} ErrDependencyBuild = BuildErrorReason{Reason: "DependencyBuildError", Summary: "dependency build error"} ErrChartPackage = BuildErrorReason{Reason: "ChartPackageError", Summary: "chart package error"} + ErrChartVerification = BuildErrorReason{Reason: "ChartVerificationError", Summary: "chart verification error"} ErrUnknown = BuildErrorReason{Reason: "Unknown", Summary: "unknown build error"} ) diff --git a/internal/helm/chart/secureloader/ignore/doc.go b/internal/helm/chart/secureloader/ignore/doc.go index 4ca25c989..16c9a79e8 100644 --- a/internal/helm/chart/secureloader/ignore/doc.go +++ b/internal/helm/chart/secureloader/ignore/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -/*Package ignore provides tools for writing ignore files (a la .gitignore). +/* +Package ignore provides tools for writing ignore files (a la .gitignore). This provides both an ignore parser and a file-aware processor. @@ -23,19 +24,19 @@ format for .gitignore files (https://git-scm.com/docs/gitignore). The formatting rules are as follows: - - Parsing is line-by-line - - Empty lines are ignored - - Lines the begin with # (comments) will be ignored - - Leading and trailing spaces are always ignored - - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) - - There is no support for multi-line patterns - - Shell glob patterns are supported. See Go's "path/filepath".Match - - If a pattern begins with a leading !, the match will be negated. - - If a pattern begins with a leading /, only paths relatively rooted will match. - - If the pattern ends with a trailing /, only directories will match - - If a pattern contains no slashes, file basenames are tested (not paths) - - The pattern sequence "**", while legal in a glob, will cause an error here - (to indicate incompatibility with .gitignore). + - Parsing is line-by-line + - Empty lines are ignored + - Lines the begin with # (comments) will be ignored + - Leading and trailing spaces are always ignored + - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) + - There is no support for multi-line patterns + - Shell glob patterns are supported. See Go's "path/filepath".Match + - If a pattern begins with a leading !, the match will be negated. + - If a pattern begins with a leading /, only paths relatively rooted will match. + - If the pattern ends with a trailing /, only directories will match + - If a pattern contains no slashes, file basenames are tested (not paths) + - The pattern sequence "**", while legal in a glob, will cause an error here + (to indicate incompatibility with .gitignore). Example: @@ -58,10 +59,10 @@ Example: a[b-d].txt Notable differences from .gitignore: - - The '**' syntax is not supported. - - The globbing library is Go's 'filepath.Match', not fnmatch(3) - - Trailing spaces are always ignored (there is no supported escape sequence) - - The evaluation of escape sequences has not been tested for compatibility - - There is no support for '\!' as a special leading sequence. + - The '**' syntax is not supported. + - The globbing library is Go's 'filepath.Match', not fnmatch(3) + - Trailing spaces are always ignored (there is no supported escape sequence) + - The evaluation of escape sequences has not been tested for compatibility + - There is no support for '\!' as a special leading sequence. */ package ignore diff --git a/internal/helm/common/string_resource.go b/internal/helm/common/string_resource.go new file mode 100644 index 000000000..b4cdada9f --- /dev/null +++ b/internal/helm/common/string_resource.go @@ -0,0 +1,39 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import "strings" + +// StringResource is there to satisfy the github.com/google/go-containerregistry/pkg/authn.Resource interface. +// It merely wraps a given string and returns it for all of the interface's methods. +type StringResource struct { + Registry string +} + +// String returns a string representation of the StringResource. +// It converts the StringResource object to a string. +// The returned string contains the value of the StringResource. +func (r StringResource) String() string { + return r.Registry +} + +// RegistryStr returns the string representation of the registry resource. +// It converts the StringResource object to a string that represents the registry resource. +// The returned string can be used to interact with the registry resource. +func (r StringResource) RegistryStr() string { + return strings.Split(r.Registry, "/")[0] +} diff --git a/internal/helm/getter/client_opts.go b/internal/helm/getter/client_opts.go new file mode 100644 index 000000000..e40811b39 --- /dev/null +++ b/internal/helm/getter/client_opts.go @@ -0,0 +1,296 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "os" + "path" + + "github.com/google/go-containerregistry/pkg/authn" + helmgetter "helm.sh/helm/v3/pkg/getter" + helmreg "helm.sh/helm/v3/pkg/registry" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/runtime/secrets" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/helm/registry" + soci "github.com/fluxcd/source-controller/internal/oci" +) + +const ( + certFileName = "cert.pem" + keyFileName = "key.pem" + caFileName = "ca.pem" +) + +var ErrDeprecatedTLSConfig = errors.New("TLS configured in a deprecated manner") + +// ClientOpts contains the various options to use while constructing +// a Helm repository client. +type ClientOpts struct { + Authenticator authn.Authenticator + Keychain authn.Keychain + RegLoginOpts []helmreg.LoginOption + TlsConfig *tls.Config + GetterOpts []helmgetter.Option + Insecure bool +} + +// MustLoginToRegistry returns true if the client options contain at least +// one registry login option. +func (o ClientOpts) MustLoginToRegistry() bool { + return len(o.RegLoginOpts) > 0 && o.RegLoginOpts[0] != nil +} + +// GetClientOpts uses the provided HelmRepository object and a normalized +// URL to construct a HelmClientOpts object. If obj is an OCI HelmRepository, +// then the returned options object will also contain the required registry +// auth mechanisms. +// A temporary directory is created to store the certs files if needed and its path is returned along with the options object. It is the +// caller's responsibility to clean up the directory. +func GetClientOpts(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, url string) (*ClientOpts, string, error) { + // This function configures authentication for Helm repositories based on the provided secrets: + // - CertSecretRef: TLS client certificates (always takes priority) + // - SecretRef: Can contain Basic Auth or TLS certificates (deprecated) + // For OCI repositories, additional registry-specific authentication is configured (including Docker config) + opts := &ClientOpts{ + GetterOpts: []helmgetter.Option{ + helmgetter.WithURL(url), + helmgetter.WithTimeout(obj.GetTimeout()), + helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), + }, + Insecure: obj.Spec.Insecure, + } + + // Process secrets and configure authentication + deprecatedTLS, certSecret, authSecret, err := configureAuthentication(ctx, c, obj, opts, url) + if err != nil { + return nil, "", err + } + + // Setup OCI registry specific configurations if needed + var tempCertDir string + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { + tempCertDir, err = configureOCIRegistryWithSecrets(ctx, obj, opts, url, certSecret, authSecret) + if err != nil { + return nil, "", err + } + } + + var deprecatedErr error + if deprecatedTLS { + deprecatedErr = ErrDeprecatedTLSConfig + } + + return opts, tempCertDir, deprecatedErr +} + +// configureAuthentication processes all secret references and sets up authentication. +// Returns (deprecatedTLS, certSecret, authSecret, error) where: +// - deprecatedTLS: true if TLS config comes from SecretRef (deprecated pattern) +// - certSecret: the secret from CertSecretRef (nil if not specified) +// - authSecret: the secret from SecretRef (nil if not specified) +func configureAuthentication(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, opts *ClientOpts, url string) (bool, *corev1.Secret, *corev1.Secret, error) { + var deprecatedTLS bool + var certSecret, authSecret *corev1.Secret + + if obj.Spec.CertSecretRef != nil { + secret, err := fetchSecret(ctx, c, obj.Spec.CertSecretRef.Name, obj.GetNamespace()) + if err != nil { + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.CertSecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get TLS authentication secret '%s': %w", secretRef, err) + } + certSecret = secret + + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + tlsConfig, err := secrets.TLSConfigFromSecret(ctx, secret, tlsOpts...) + if err != nil { + return false, nil, nil, fmt.Errorf("failed to construct Helm client's TLS config: %w", err) + } + opts.TlsConfig = tlsConfig + } + + // Extract all authentication methods from SecretRef. + // This secret may contain multiple auth types (Basic Auth, TLS). + if obj.Spec.SecretRef != nil { + secret, err := fetchSecret(ctx, c, obj.Spec.SecretRef.Name, obj.GetNamespace()) + if err != nil { + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get authentication secret '%s': %w", secretRef, err) + } + authSecret = secret + + // NOTE: Use WithTLSSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository auth methods work with both system and user-provided CA certificates. + var authOpts = []secrets.AuthMethodsOption{ + secrets.WithTLSSystemCertPool(), + } + methods, err := secrets.AuthMethodsFromSecret(ctx, secret, authOpts...) + if err != nil { + return false, nil, nil, fmt.Errorf("failed to detect authentication methods: %w", err) + } + + if methods.HasBasicAuth() { + opts.GetterOpts = append(opts.GetterOpts, + helmgetter.WithBasicAuth(methods.Basic.Username, methods.Basic.Password)) + } + + // Use TLS from SecretRef only if CertSecretRef is not specified (CertSecretRef takes priority) + if opts.TlsConfig == nil && methods.HasTLS() { + opts.TlsConfig = methods.TLS + deprecatedTLS = true + } + } + + return deprecatedTLS, certSecret, authSecret, nil +} + +// configureOCIRegistryWithSecrets sets up OCI-specific configurations using pre-fetched secrets +func configureOCIRegistryWithSecrets(ctx context.Context, obj *sourcev1.HelmRepository, opts *ClientOpts, url string, certSecret, authSecret *corev1.Secret) (string, error) { + // Configure OCI authentication from authSecret if available + if authSecret != nil { + keychain, err := registry.LoginOptionFromSecret(url, *authSecret) + if err != nil { + return "", fmt.Errorf("failed to configure login options: %w", err) + } + opts.Keychain = keychain + } + + // Handle OCI provider authentication if no SecretRef + if obj.Spec.SecretRef == nil && obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider { + authenticator, err := soci.OIDCAuth(ctx, url, obj.Spec.Provider) + if err != nil { + return "", fmt.Errorf("failed to get credential from '%s': %w", obj.Spec.Provider, err) + } + opts.Authenticator = authenticator + } + + // Setup registry login options + loginOpt, err := registry.NewLoginOption(opts.Authenticator, opts.Keychain, url) + if err != nil { + return "", err + } + + if loginOpt != nil { + opts.RegLoginOpts = []helmreg.LoginOption{loginOpt, helmreg.LoginOptInsecure(obj.Spec.Insecure)} + } + + // Handle TLS certificate files for OCI + var tempCertDir string + if opts.TlsConfig != nil { + tempCertDir, err = os.MkdirTemp("", "helm-repo-oci-certs") + if err != nil { + return "", fmt.Errorf("cannot create temporary directory: %w", err) + } + + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret + } + + certFile, keyFile, caFile, err := storeTLSCertificateFilesForOCI(ctx, tlsSecret, nil, tempCertDir) + if err != nil { + return "", fmt.Errorf("cannot write certs files to path: %w", err) + } + + tlsLoginOpt := registry.TLSLoginOption(certFile, keyFile, caFile) + if tlsLoginOpt != nil { + opts.RegLoginOpts = append(opts.RegLoginOpts, tlsLoginOpt) + } + } + + return tempCertDir, nil +} + +func fetchSecret(ctx context.Context, c client.Client, name, namespace string) (*corev1.Secret, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + var secret corev1.Secret + if err := c.Get(ctx, key, &secret); err != nil { + return nil, err + } + return &secret, nil +} + +// storeTLSCertificateFilesForOCI writes TLS certificate data from secrets to files for OCI registry authentication. +// Helm OCI registry client requires certificate file paths rather than in-memory data, +// so we need to temporarily write the certificate data to disk. +// Returns paths to the written cert, key, and CA files (any of which may be empty if not present). +func storeTLSCertificateFilesForOCI(ctx context.Context, certSecret, authSecret *corev1.Secret, path string) (string, string, string, error) { + var ( + certFile string + keyFile string + caFile string + err error + ) + + // Try to get TLS data from certSecret first, then authSecret + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret + } + + if tlsSecret != nil { + if certData, exists := tlsSecret.Data[secrets.KeyTLSCert]; exists { + if keyData, keyExists := tlsSecret.Data[secrets.KeyTLSPrivateKey]; keyExists { + certFile, err = writeToFile(certData, certFileName, path) + if err != nil { + return "", "", "", err + } + keyFile, err = writeToFile(keyData, keyFileName, path) + if err != nil { + return "", "", "", err + } + } + } + + if caData, exists := tlsSecret.Data[secrets.KeyCACert]; exists { + caFile, err = writeToFile(caData, caFileName, path) + if err != nil { + return "", "", "", err + } + } + } + + return certFile, keyFile, caFile, nil +} + +func writeToFile(data []byte, filename, tmpDir string) (string, error) { + file := path.Join(tmpDir, filename) + err := os.WriteFile(file, data, 0o600) + if err != nil { + return "", err + } + return file, nil +} diff --git a/internal/helm/getter/client_opts_test.go b/internal/helm/getter/client_opts_test.go new file mode 100644 index 000000000..bf40e7f86 --- /dev/null +++ b/internal/helm/getter/client_opts_test.go @@ -0,0 +1,300 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "context" + "os" + "strings" + "testing" + "time" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/google/go-containerregistry/pkg/name" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + helmv1 "github.com/fluxcd/source-controller/api/v1" +) + +func TestGetClientOpts(t *testing.T) { + tlsCA, err := os.ReadFile("../../controller/testdata/certs/ca.pem") + if err != nil { + t.Errorf("could not read CA file: %s", err) + } + + tests := []struct { + name string + certSecret *corev1.Secret + authSecret *corev1.Secret + afterFunc func(t *WithT, hcOpts *ClientOpts) + oci bool + insecure bool + err error + }{ + { + name: "HelmRepository with certSecretRef discards TLS config in secretRef", + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.TlsConfig).ToNot(BeNil()) + t.Expect(len(hcOpts.GetterOpts)).To(Equal(4)) + }, + }, + { + name: "HelmRepository with TLS config only in secretRef is marked as deprecated", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-tls", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + "caFile": tlsCA, + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.TlsConfig).ToNot(BeNil()) + t.Expect(len(hcOpts.GetterOpts)).To(Equal(4)) + }, + err: ErrDeprecatedTLSConfig, + }, + { + name: "OCI HelmRepository with secretRef has auth configured", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + repo, err := name.NewRepository("ghcr.io/dummy") + t.Expect(err).ToNot(HaveOccurred()) + authenticator, err := hcOpts.Keychain.Resolve(repo) + t.Expect(err).ToNot(HaveOccurred()) + config, err := authenticator.Authorization() + t.Expect(err).ToNot(HaveOccurred()) + t.Expect(config.Username).To(Equal("user")) + t.Expect(config.Password).To(Equal("pass")) + t.Expect(hcOpts.Insecure).To(BeFalse()) + }, + oci: true, + }, + { + name: "OCI HelmRepository with insecure repository", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.Insecure).To(BeTrue()) + }, + oci: true, + insecure: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + helmRepo := &helmv1.HelmRepository{ + Spec: helmv1.HelmRepositorySpec{ + Timeout: &metav1.Duration{ + Duration: time.Second, + }, + Insecure: tt.insecure, + }, + } + if tt.oci { + helmRepo.Spec.Type = helmv1.HelmRepositoryTypeOCI + } + + clientBuilder := fakeclient.NewClientBuilder() + if tt.authSecret != nil { + clientBuilder.WithObjects(tt.authSecret.DeepCopy()) + helmRepo.Spec.SecretRef = &meta.LocalObjectReference{ + Name: tt.authSecret.Name, + } + } + if tt.certSecret != nil { + clientBuilder.WithObjects(tt.certSecret.DeepCopy()) + helmRepo.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.certSecret.Name, + } + } + c := clientBuilder.Build() + + clientOpts, _, err := GetClientOpts(context.TODO(), c, helmRepo, "https://ghcr.io/dummy") + if tt.err != nil { + g.Expect(err).To(Equal(tt.err)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + tt.afterFunc(g, clientOpts) + }) + } +} + +func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { + tlsCA, err := os.ReadFile("../../controller/testdata/certs/ca.pem") + if err != nil { + t.Errorf("could not read CA file: %s", err) + } + + tests := []struct { + name string + certSecret *corev1.Secret + authSecret *corev1.Secret + loginOptsN int + wantErrMsg string + }{ + { + name: "with valid caFile", + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + loginOptsN: 3, + }, + { + name: "without caFile", + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{}, + }, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + wantErrMsg: "must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'", + }, + { + name: "without cert secret", + certSecret: nil, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + loginOptsN: 2, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + helmRepo := &helmv1.HelmRepository{ + Spec: helmv1.HelmRepositorySpec{ + Timeout: &metav1.Duration{ + Duration: time.Second, + }, + Type: helmv1.HelmRepositoryTypeOCI, + }, + } + + clientBuilder := fakeclient.NewClientBuilder() + + if tt.authSecret != nil { + clientBuilder.WithObjects(tt.authSecret.DeepCopy()) + helmRepo.Spec.SecretRef = &meta.LocalObjectReference{ + Name: tt.authSecret.Name, + } + } + + if tt.certSecret != nil { + clientBuilder.WithObjects(tt.certSecret.DeepCopy()) + helmRepo.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.certSecret.Name, + } + } + c := clientBuilder.Build() + + clientOpts, tmpDir, err := GetClientOpts(context.TODO(), c, helmRepo, "https://ghcr.io/dummy") + if tt.wantErrMsg != "" { + if err == nil { + t.Errorf("GetClientOpts() expected error but got none") + return + } + if !strings.Contains(err.Error(), tt.wantErrMsg) { + t.Errorf("GetClientOpts() expected error containing %q but got %v", tt.wantErrMsg, err) + return + } + return + } + if err != nil { + t.Errorf("GetClientOpts() error = %v", err) + return + } + if tmpDir != "" { + defer os.RemoveAll(tmpDir) + } + if tt.loginOptsN != len(clientOpts.RegLoginOpts) { + // we should have a login option but no TLS option + t.Errorf("expected length of %d for clientOpts.RegLoginOpts but got %d", tt.loginOptsN, len(clientOpts.RegLoginOpts)) + return + } + }) + } +} diff --git a/internal/helm/getter/getter.go b/internal/helm/getter/getter.go deleted file mode 100644 index 0b45f0101..000000000 --- a/internal/helm/getter/getter.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "net/url" - - "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" -) - -// ClientOptionsFromSecret constructs a getter.Option slice for the given secret. -// It returns the slice, or an error. -func ClientOptionsFromSecret(secret corev1.Secret) ([]getter.Option, error) { - var opts []getter.Option - basicAuth, err := BasicAuthFromSecret(secret) - if err != nil { - return opts, err - } - if basicAuth != nil { - opts = append(opts, basicAuth) - } - return opts, nil -} - -// BasicAuthFromSecret attempts to construct a basic auth getter.Option for the -// given v1.Secret and returns the result. -// -// Secrets with no username AND password are ignored, if only one is defined it -// returns an error. -func BasicAuthFromSecret(secret corev1.Secret) (getter.Option, error) { - username, password := string(secret.Data["username"]), string(secret.Data["password"]) - switch { - case username == "" && password == "": - return nil, nil - case username == "" || password == "": - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) - } - return getter.WithBasicAuth(username, password), nil -} - -// TLSClientConfigFromSecret attempts to construct a TLS client config -// for the given v1.Secret. It returns the TLS client config or an error. -// -// Secrets with no certFile, keyFile, AND caFile are ignored, if only a -// certBytes OR keyBytes is defined it returns an error. -func TLSClientConfigFromSecret(secret corev1.Secret, repositoryUrl string) (*tls.Config, error) { - certBytes, keyBytes, caBytes := secret.Data["certFile"], secret.Data["keyFile"], secret.Data["caFile"] - switch { - case len(certBytes)+len(keyBytes)+len(caBytes) == 0: - return nil, nil - case (len(certBytes) > 0 && len(keyBytes) == 0) || (len(keyBytes) > 0 && len(certBytes) == 0): - return nil, fmt.Errorf("invalid '%s' secret data: fields 'certFile' and 'keyFile' require each other's presence", - secret.Name) - } - - tlsConf := &tls.Config{} - if len(certBytes) > 0 && len(keyBytes) > 0 { - cert, err := tls.X509KeyPair(certBytes, keyBytes) - if err != nil { - return nil, err - } - tlsConf.Certificates = append(tlsConf.Certificates, cert) - } - - if len(caBytes) > 0 { - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(caBytes) { - return nil, fmt.Errorf("cannot append certificate into certificate pool: invalid caFile") - } - - tlsConf.RootCAs = cp - } - - tlsConf.BuildNameToCertificate() - - u, err := url.Parse(repositoryUrl) - if err != nil { - return nil, fmt.Errorf("cannot parse repository URL: %w", err) - } - - tlsConf.ServerName = u.Hostname() - - return tlsConf, nil -} diff --git a/internal/helm/getter/getter_test.go b/internal/helm/getter/getter_test.go deleted file mode 100644 index a13c029e3..000000000 --- a/internal/helm/getter/getter_test.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "math/big" - "testing" - - corev1 "k8s.io/api/core/v1" -) - -var ( - basicAuthSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "username": []byte("user"), - "password": []byte("password"), - }, - } -) - -func TestClientOptionsFromSecret(t *testing.T) { - tests := []struct { - name string - secrets []corev1.Secret - }{ - {"basic auth", []corev1.Secret{basicAuthSecretFixture}}, - {"empty", []corev1.Secret{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := corev1.Secret{Data: map[string][]byte{}} - for _, s := range tt.secrets { - for k, v := range s.Data { - secret.Data[k] = v - } - } - - got, err := ClientOptionsFromSecret(secret) - if err != nil { - t.Errorf("ClientOptionsFromSecret() error = %v", err) - return - } - if len(got) != len(tt.secrets) { - t.Errorf("ClientOptionsFromSecret() options = %v, expected = %v", got, len(tt.secrets)) - } - }) - } -} - -func TestBasicAuthFromSecret(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - wantNil bool - }{ - {"username and password", basicAuthSecretFixture, nil, false, false}, - {"without username", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "username") }, true, true}, - {"without password", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "password") }, true, true}, - {"empty", corev1.Secret{}, nil, false, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - got, err := BasicAuthFromSecret(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("BasicAuthFromSecret() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantNil && got != nil { - t.Error("BasicAuthFromSecret() != nil") - return - } - }) - } -} - -func TestTLSClientConfigFromSecret(t *testing.T) { - tlsSecretFixture := validTlsSecret(t) - - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - wantNil bool - }{ - {"certFile, keyFile and caFile", tlsSecretFixture, nil, false, false}, - {"without certFile", tlsSecretFixture, func(s *corev1.Secret) { delete(s.Data, "certFile") }, true, true}, - {"without keyFile", tlsSecretFixture, func(s *corev1.Secret) { delete(s.Data, "keyFile") }, true, true}, - {"without caFile", tlsSecretFixture, func(s *corev1.Secret) { delete(s.Data, "caFile") }, false, false}, - {"empty", corev1.Secret{}, nil, false, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - - got, err := TLSClientConfigFromSecret(*secret, "") - if (err != nil) != tt.wantErr { - t.Errorf("TLSClientConfigFromSecret() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantNil && got != nil { - t.Error("TLSClientConfigFromSecret() != nil") - return - } - }) - } -} - -// validTlsSecret creates a secret containing key pair and CA certificate that are -// valid from a syntax (minimum requirements) perspective. -func validTlsSecret(t *testing.T) corev1.Secret { - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal("Private key cannot be created.", err.Error()) - } - - certTemplate := x509.Certificate{ - SerialNumber: big.NewInt(1337), - } - cert, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &key.PublicKey, key) - if err != nil { - t.Fatal("Certificate cannot be created.", err.Error()) - } - - ca := &x509.Certificate{ - SerialNumber: big.NewInt(7331), - IsCA: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - } - - caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - t.Fatal("CA private key cannot be created.", err.Error()) - } - - caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) - if err != nil { - t.Fatal("CA certificate cannot be created.", err.Error()) - } - - keyPem := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), - }) - - certPem := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert, - }) - - caPem := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - }) - - return corev1.Secret{ - Data: map[string][]byte{ - "certFile": []byte(certPem), - "keyFile": []byte(keyPem), - "caFile": []byte(caPem), - }, - } -} diff --git a/internal/helm/registry/auth.go b/internal/helm/registry/auth.go index 75667f1d5..c8b3ca6ae 100644 --- a/internal/helm/registry/auth.go +++ b/internal/helm/registry/auth.go @@ -23,26 +23,43 @@ import ( "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/credentials" + "github.com/fluxcd/source-controller/internal/helm/common" + "github.com/fluxcd/source-controller/internal/oci" + "github.com/google/go-containerregistry/pkg/authn" "helm.sh/helm/v3/pkg/registry" corev1 "k8s.io/api/core/v1" ) +// helper is a subset of the Docker credential helper credentials.Helper interface used by NewKeychainFromHelper. +type helper struct { + registry string + username, password string + err error +} + +func (h helper) Get(serverURL string) (string, string, error) { + if serverURL != h.registry { + return "", "", fmt.Errorf("unexpected serverURL: %s", serverURL) + } + return h.username, h.password, h.err +} + // LoginOptionFromSecret derives authentication data from a Secret to login to an OCI registry. This Secret // may either hold "username" and "password" fields or be of the corev1.SecretTypeDockerConfigJson type and hold // a corev1.DockerConfigJsonKey field with a complete Docker configuration. If both, "username" and "password" are // empty, a nil LoginOption and a nil error will be returned. -func LoginOptionFromSecret(registryURL string, secret corev1.Secret) (registry.LoginOption, error) { +func LoginOptionFromSecret(registryURL string, secret corev1.Secret) (authn.Keychain, error) { var username, password string + parsedURL, err := url.Parse(registryURL) + if err != nil { + return nil, fmt.Errorf("unable to parse registry URL '%s' while reconciling Secret '%s': %w", + registryURL, secret.Name, err) + } if secret.Type == corev1.SecretTypeDockerConfigJson { dockerCfg, err := config.LoadFromReader(bytes.NewReader(secret.Data[corev1.DockerConfigJsonKey])) if err != nil { return nil, fmt.Errorf("unable to load Docker config from Secret '%s': %w", secret.Name, err) } - parsedURL, err := url.Parse(registryURL) - if err != nil { - return nil, fmt.Errorf("unable to parse registry URL '%s' while reconciling Secret '%s': %w", - registryURL, secret.Name, err) - } authConfig, err := dockerCfg.GetAuthConfig(parsedURL.Host) if err != nil { return nil, fmt.Errorf("unable to get authentication data from Secret '%s': %w", secret.Name, err) @@ -62,9 +79,74 @@ func LoginOptionFromSecret(registryURL string, secret corev1.Secret) (registry.L } switch { case username == "" && password == "": - return nil, nil + return oci.Anonymous{}, nil case username == "" || password == "": return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) } + return authn.NewKeychainFromHelper(helper{registry: parsedURL.Host, username: username, password: password}), nil +} + +// KeyChainAdaptHelper returns an ORAS credentials callback configured with the authorization data +// from the given authn keychain. This allows for example to make use of credential helpers from +// cloud providers. +// Ref: https://github.com/google/go-containerregistry/tree/main/pkg/authn +func KeychainAdaptHelper(keyChain authn.Keychain) func(string) (registry.LoginOption, error) { + return func(registryURL string) (registry.LoginOption, error) { + parsedURL, err := url.Parse(registryURL) + if err != nil { + return nil, fmt.Errorf("unable to parse registry URL '%s'", registryURL) + } + authenticator, err := keyChain.Resolve(common.StringResource{Registry: parsedURL.Host}) + if err != nil { + return nil, fmt.Errorf("unable to resolve credentials for registry '%s': %w", registryURL, err) + } + + return AuthAdaptHelper(authenticator) + } +} + +// AuthAdaptHelper returns an ORAS credentials callback configured with the authorization data +// from the given authn authenticator. This allows for example to make use of credential helpers from +// cloud providers. +// Ref: https://github.com/google/go-containerregistry/tree/main/pkg/authn +func AuthAdaptHelper(auth authn.Authenticator) (registry.LoginOption, error) { + authConfig, err := auth.Authorization() + if err != nil { + return nil, fmt.Errorf("unable to get authentication data from OIDC: %w", err) + } + + username := authConfig.Username + password := authConfig.Password + + switch { + case username == "" && password == "": + return nil, nil + case username == "" || password == "": + return nil, fmt.Errorf("invalid auth data: required fields 'username' and 'password'") + } return registry.LoginOptBasicAuth(username, password), nil } + +// NewLoginOption returns a registry login option for the given HelmRepository. +// If the HelmRepository does not specify a secretRef, a nil login option is returned. +func NewLoginOption(auth authn.Authenticator, keychain authn.Keychain, registryURL string) (registry.LoginOption, error) { + if auth != nil { + return AuthAdaptHelper(auth) + } + + if keychain != nil { + return KeychainAdaptHelper(keychain)(registryURL) + } + + return nil, nil +} + +// TLSLoginOption returns a LoginOption that can be used to configure the TLS client. +// It requires either the caFile or both certFile and keyFile to be not blank. +func TLSLoginOption(certFile, keyFile, caFile string) registry.LoginOption { + if (certFile != "" && keyFile != "") || caFile != "" { + return registry.LoginOptTLSClientConfig(certFile, keyFile, caFile) + } + + return nil +} diff --git a/internal/helm/registry/auth_test.go b/internal/helm/registry/auth_test.go index 921ecbf14..14942a5bb 100644 --- a/internal/helm/registry/auth_test.go +++ b/internal/helm/registry/auth_test.go @@ -17,12 +17,16 @@ limitations under the License. package registry import ( + "net/url" "testing" + "github.com/google/go-containerregistry/pkg/authn" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" ) +const repoURL = "https://example.com" + func TestLoginOptionFromSecret(t *testing.T) { testURL := "oci://registry.example.com/foo/bar" testUser := "flux" @@ -129,3 +133,61 @@ func TestLoginOptionFromSecret(t *testing.T) { }) } } + +func TestKeychainAdaptHelper(t *testing.T) { + g := NewWithT(t) + reg, err := url.Parse(repoURL) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + auth := helper{ + username: "flux", + password: "flux_password", + registry: reg.Host, + } + + tests := []struct { + name string + auth authn.Keychain + expectedLogin bool + wantErr bool + }{ + { + name: "Login from basic auth with empty auth", + auth: authn.NewKeychainFromHelper(helper{}), + expectedLogin: false, + wantErr: false, + }, + { + name: "Login from basic auth", + auth: authn.NewKeychainFromHelper(auth), + expectedLogin: true, + wantErr: false, + }, + { + name: "Login with missing password", + auth: authn.NewKeychainFromHelper(helper{username: "flux", registry: reg.Host}), + expectedLogin: false, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + loginOpt, err := KeychainAdaptHelper(tt.auth)(repoURL) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + + if tt.expectedLogin { + g.Expect(loginOpt).ToNot(BeNil()) + } else { + g.Expect(loginOpt).To(BeNil()) + } + }) + } +} diff --git a/internal/helm/registry/client.go b/internal/helm/registry/client.go index 1247347ab..5b89ea12e 100644 --- a/internal/helm/registry/client.go +++ b/internal/helm/registry/client.go @@ -17,7 +17,9 @@ limitations under the License. package registry import ( + "crypto/tls" "io" + "net/http" "os" "helm.sh/helm/v3/pkg/registry" @@ -27,7 +29,7 @@ import ( // ClientGenerator generates a registry client and a temporary credential file. // The client is meant to be used for a single reconciliation. // The file is meant to be used for a single reconciliation and deleted after. -func ClientGenerator(isLogin bool) (*registry.Client, string, error) { +func ClientGenerator(tlsConfig *tls.Config, isLogin, insecureHTTP bool) (*registry.Client, string, error) { if isLogin { // create a temporary file to store the credentials // this is needed because otherwise the credentials are stored in ~/.docker/config.json. @@ -37,7 +39,7 @@ func ClientGenerator(isLogin bool) (*registry.Client, string, error) { } var errs []error - rClient, err := registry.NewClient(registry.ClientOptWriter(io.Discard), registry.ClientOptCredentialsFile(credentialsFile.Name())) + rClient, err := newClient(credentialsFile.Name(), tlsConfig, insecureHTTP) if err != nil { errs = append(errs, err) // attempt to delete the temporary file @@ -52,9 +54,30 @@ func ClientGenerator(isLogin bool) (*registry.Client, string, error) { return rClient, credentialsFile.Name(), nil } - rClient, err := registry.NewClient(registry.ClientOptWriter(io.Discard)) + rClient, err := newClient("", tlsConfig, insecureHTTP) if err != nil { return nil, "", err } return rClient, "", nil } + +func newClient(credentialsFile string, tlsConfig *tls.Config, insecureHTTP bool) (*registry.Client, error) { + opts := []registry.ClientOption{ + registry.ClientOptWriter(io.Discard), + } + if insecureHTTP { + opts = append(opts, registry.ClientOptPlainHTTP()) + } + if tlsConfig != nil { + t := http.DefaultTransport.(*http.Transport).Clone() + t.TLSClientConfig = tlsConfig + opts = append(opts, registry.ClientOptHTTPClient(&http.Client{ + Transport: t, + })) + } + if credentialsFile != "" { + opts = append(opts, registry.ClientOptCredentialsFile(credentialsFile)) + } + + return registry.NewClient(opts...) +} diff --git a/internal/helm/repository/chart_repository.go b/internal/helm/repository/chart_repository.go index 282d49a5d..e8030ec7b 100644 --- a/internal/helm/repository/chart_repository.go +++ b/internal/helm/repository/chart_repository.go @@ -18,9 +18,9 @@ package repository import ( "bytes" - "crypto/sha256" + "context" "crypto/tls" - "encoding/hex" + "encoding/json" "errors" "fmt" "io" @@ -30,22 +30,86 @@ import ( "sort" "strings" "sync" - "time" "github.com/Masterminds/semver/v3" + "github.com/opencontainers/go-digest" + "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/getter" "helm.sh/helm/v3/pkg/repo" - kerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/yaml" "github.com/fluxcd/pkg/version" - "github.com/fluxcd/source-controller/internal/cache" + "github.com/fluxcd/pkg/http/transport" "github.com/fluxcd/source-controller/internal/helm" - "github.com/fluxcd/source-controller/internal/transport" + "github.com/fluxcd/source-controller/internal/oci" ) -var ErrNoChartIndex = errors.New("no chart index") +var ( + ErrNoChartIndex = errors.New("no chart index") +) + +// IndexFromFile loads a repo.IndexFile from the given path. It returns an +// error if the file does not exist, is not a regular file, exceeds the +// maximum index file size, or if the file cannot be parsed. +func IndexFromFile(path string) (*repo.IndexFile, error) { + st, err := os.Lstat(path) + if err != nil { + return nil, err + } + if !st.Mode().IsRegular() { + return nil, fmt.Errorf("%s is not a regular file", path) + } + if st.Size() > helm.MaxIndexSize { + return nil, fmt.Errorf("%s exceeds the maximum index file size of %d bytes", path, helm.MaxIndexSize) + } + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return IndexFromBytes(b) +} + +// IndexFromBytes loads a repo.IndexFile from the given bytes. It returns an +// error if the bytes cannot be parsed, or if the API version is not set. +// The entries are sorted before the index is returned. +func IndexFromBytes(b []byte) (*repo.IndexFile, error) { + if len(b) == 0 { + return nil, repo.ErrEmptyIndexYaml + } + + i := &repo.IndexFile{} + if err := jsonOrYamlUnmarshal(b, i); err != nil { + return nil, err + } + + if i.APIVersion == "" { + return nil, repo.ErrNoAPIVersion + } + + for name, cvs := range i.Entries { + for idx := len(cvs) - 1; idx >= 0; idx-- { + if cvs[idx] == nil { + continue + } + // When metadata section missing, initialize with no data + if cvs[idx].Metadata == nil { + cvs[idx].Metadata = &chart.Metadata{} + } + if cvs[idx].APIVersion == "" { + cvs[idx].APIVersion = chart.APIVersionV1 + } + if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil { + cvs = append(cvs[:idx], cvs[idx+1:]...) + } + } + // adjust slice to only contain a set of valid versions + i.Entries[name] = cvs + } + + i.SortEntries() + return i, nil +} // ChartRepository represents a Helm chart repository, and the configuration // required to download the chart index and charts from the repository. @@ -54,73 +118,31 @@ type ChartRepository struct { // URL the ChartRepository's index.yaml can be found at, // without the index.yaml suffix. URL string + // Path is the absolute path to the Index file. + Path string + // Index of the ChartRepository. + Index *repo.IndexFile + // Client to use while downloading the Index or a chart from the URL. Client getter.Getter // Options to configure the Client with while downloading the Index // or a chart from the URL. Options []getter.Option - // CachePath is the path of a cached index.yaml for read-only operations. - CachePath string - // Cached indicates if the ChartRepository index.yaml has been cached - // to CachePath. - Cached bool - // Index contains a loaded chart repository index if not nil. - Index *repo.IndexFile - // Checksum contains the SHA256 checksum of the loaded chart repository - // index bytes. This is different from the checksum of the CachePath, which - // may contain unordered entries. - Checksum string tlsConfig *tls.Config - *sync.RWMutex - - cacheInfo -} - -type cacheInfo struct { - // In memory cache of the index.yaml file. - IndexCache *cache.Cache - // IndexKey is the cache key for the index.yaml file. - IndexKey string - // IndexTTL is the cache TTL for the index.yaml file. - IndexTTL time.Duration - // RecordIndexCacheMetric records the cache hit/miss metrics for the index.yaml file. - RecordIndexCacheMetric RecordMetricsFunc -} + cached bool + digests map[digest.Algorithm]digest.Digest -// ChartRepositoryOption is a function that can be passed to NewChartRepository -// to configure a ChartRepository. -type ChartRepositoryOption func(*ChartRepository) error - -// RecordMetricsFunc is a function that records metrics. -type RecordMetricsFunc func(event string) - -// WithMemoryCache returns a ChartRepositoryOptions that will enable the -// ChartRepository to cache the index.yaml file in memory. -// The cache key have to be safe in multi-tenancy environments, -// as otherwise it could be used as a vector to bypass the helm repository's authentication. -func WithMemoryCache(key string, c *cache.Cache, ttl time.Duration, rec RecordMetricsFunc) ChartRepositoryOption { - return func(r *ChartRepository) error { - if c != nil { - if key == "" { - return errors.New("cache key cannot be empty") - } - } - r.IndexCache = c - r.IndexKey = key - r.IndexTTL = ttl - r.RecordIndexCacheMetric = rec - return nil - } + *sync.RWMutex } // NewChartRepository constructs and returns a new ChartRepository with // the ChartRepository.Client configured to the getter.Getter for the // repository URL scheme. It returns an error on URL parsing failures, // or if there is no getter available for the scheme. -func NewChartRepository(repositoryURL, cachePath string, providers getter.Providers, tlsConfig *tls.Config, getterOpts []getter.Option, chartRepoOpts ...ChartRepositoryOption) (*ChartRepository, error) { - u, err := url.Parse(repositoryURL) +func NewChartRepository(URL, path string, providers getter.Providers, tlsConfig *tls.Config, getterOpts ...getter.Option) (*ChartRepository, error) { + u, err := url.Parse(URL) if err != nil { return nil, err } @@ -130,23 +152,18 @@ func NewChartRepository(repositoryURL, cachePath string, providers getter.Provid } r := newChartRepository() - r.URL = repositoryURL - r.CachePath = cachePath + r.URL = URL + r.Path = path r.Client = c r.Options = getterOpts r.tlsConfig = tlsConfig - for _, opt := range chartRepoOpts { - if err := opt(r); err != nil { - return nil, err - } - } - return r, nil } func newChartRepository() *ChartRepository { return &ChartRepository{ + digests: make(map[digest.Algorithm]digest.Digest, 0), RWMutex: &sync.RWMutex{}, } } @@ -157,9 +174,17 @@ func newChartRepository() *ChartRepository { func (r *ChartRepository) GetChartVersion(name, ver string) (*repo.ChartVersion, error) { // See if we already have the index in cache or try to load it. if err := r.StrategicallyLoadIndex(); err != nil { - return nil, err + return nil, &ErrExternal{Err: err} } + cv, err := r.getChartVersion(name, ver) + if err != nil { + return nil, &ErrReference{Err: err} + } + return cv, nil +} + +func (r *ChartRepository) getChartVersion(name, ver string) (*repo.ChartVersion, error) { r.RLock() defer r.RUnlock() @@ -196,10 +221,10 @@ func (r *ChartRepository) GetChartVersion(name, ver string) (*repo.ChartVersion, } } - // Filter out chart versions that doesn't satisfy constraints if any, + // Filter out chart versions that don't satisfy constraints if any, // parse semver and build a lookup table var matchedVersions semver.Collection - lookup := make(map[*semver.Version]*repo.ChartVersion) + lookup := make(map[*semver.Version]*repo.ChartVersion, 0) for _, cv := range cvs { v, err := version.ParseVersion(cv.Version) if err != nil { @@ -252,182 +277,105 @@ func (r *ChartRepository) DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer // always the correct one to pick, check for updates once in awhile. // Ref: https://github.com/helm/helm/blob/v3.3.0/pkg/downloader/chart_downloader.go#L241 ref := chart.URLs[0] - u, err := url.Parse(ref) + resolvedUrl, err := repo.ResolveReferenceURL(r.URL, ref) if err != nil { - err = fmt.Errorf("invalid chart URL format '%s': %w", ref, err) return nil, err } - // Prepend the chart repository base URL if the URL is relative - if !u.IsAbs() { - repoURL, err := url.Parse(r.URL) - if err != nil { - err = fmt.Errorf("invalid chart repository URL format '%s': %w", r.URL, err) - return nil, err - } - q := repoURL.Query() - // Trailing slash is required for ResolveReference to work - repoURL.Path = strings.TrimSuffix(repoURL.Path, "/") + "/" - u = repoURL.ResolveReference(u) - u.RawQuery = q.Encode() - } - t := transport.NewOrIdle(r.tlsConfig) clientOpts := append(r.Options, getter.WithTransport(t)) defer transport.Release(t) - return r.Client.Get(u.String(), clientOpts...) -} - -// LoadIndexFromBytes loads Index from the given bytes. -// It returns a repo.ErrNoAPIVersion error if the API version is not set -func (r *ChartRepository) LoadIndexFromBytes(b []byte) error { - i := &repo.IndexFile{} - if err := yaml.UnmarshalStrict(b, i); err != nil { - return err - } - if i.APIVersion == "" { - return repo.ErrNoAPIVersion - } - i.SortEntries() - - r.Lock() - r.Index = i - r.Checksum = fmt.Sprintf("%x", sha256.Sum256(b)) - r.Unlock() - return nil -} - -// LoadFromFile reads the file at the given path and loads it into Index. -func (r *ChartRepository) LoadFromFile(path string) error { - stat, err := os.Stat(path) - if err != nil || stat.IsDir() { - if err == nil { - err = fmt.Errorf("'%s' is a directory", path) - } - return err - } - if stat.Size() > helm.MaxIndexSize { - return fmt.Errorf("size of index '%s' exceeds '%d' bytes limit", stat.Name(), helm.MaxIndexSize) - } - b, err := os.ReadFile(path) - if err != nil { - return err - } - return r.LoadIndexFromBytes(b) + return r.Client.Get(resolvedUrl, clientOpts...) } // CacheIndex attempts to write the index from the remote into a new temporary file -// using DownloadIndex, and sets CachePath and Cached. -// It returns the SHA256 checksum of the downloaded index bytes, or an error. -// The caller is expected to handle the garbage collection of CachePath, and to -// load the Index separately using LoadFromCache if required. -func (r *ChartRepository) CacheIndex() (string, error) { +// using DownloadIndex, and sets Path and cached. +// The caller is expected to handle the garbage collection of Path, and to +// load the Index separately using LoadFromPath if required. +func (r *ChartRepository) CacheIndex() error { f, err := os.CreateTemp("", "chart-index-*.yaml") if err != nil { - return "", fmt.Errorf("failed to create temp file to cache index to: %w", err) + return fmt.Errorf("failed to create temp file to cache index to: %w", err) } - h := sha256.New() - mw := io.MultiWriter(f, h) - if err = r.DownloadIndex(mw); err != nil { + if err = r.DownloadIndex(f, helm.MaxIndexSize); err != nil { f.Close() - os.RemoveAll(f.Name()) - return "", fmt.Errorf("failed to cache index to temporary file: %w", err) + removeErr := os.Remove(f.Name()) + if removeErr != nil { + err = errors.Join(err, removeErr) + } + return fmt.Errorf("failed to cache index to temporary file: %w", err) } + if err = f.Close(); err != nil { - os.RemoveAll(f.Name()) - return "", fmt.Errorf("failed to close cached index file '%s': %w", f.Name(), err) + removeErr := os.Remove(f.Name()) + if removeErr != nil { + err = errors.Join(err, removeErr) + } + return fmt.Errorf("failed to close cached index file '%s': %w", f.Name(), err) } r.Lock() - r.CachePath = f.Name() - r.Cached = true + r.Path = f.Name() + r.Index = nil + r.cached = true + r.invalidate() r.Unlock() - return hex.EncodeToString(h.Sum(nil)), nil -} - -// CacheIndexInMemory attempts to cache the index in memory. -// It returns an error if it fails. -// The cache key have to be safe in multi-tenancy environments, -// as otherwise it could be used as a vector to bypass the helm repository's authentication. -func (r *ChartRepository) CacheIndexInMemory() error { - // Cache the index if it was successfully retrieved - // and the chart was successfully built - if r.IndexCache != nil && r.Index != nil { - err := r.IndexCache.Set(r.IndexKey, r.Index, r.IndexTTL) - if err != nil { - return err - } - } return nil } -// StrategicallyLoadIndex lazy-loads the Index -// first from Indexcache, -// then from CachePath using oadFromCache if it does not HasIndex. -// If not HasCacheFile, a cache attempt is made using CacheIndex -// before continuing to load. +// StrategicallyLoadIndex lazy-loads the Index if required, first +// attempting to load it from Path if the file exists, before falling +// back to caching it. func (r *ChartRepository) StrategicallyLoadIndex() (err error) { if r.HasIndex() { return } - if r.IndexCache != nil { - if found := r.LoadFromMemCache(); found { + if !r.HasFile() { + if err = r.CacheIndex(); err != nil { + err = fmt.Errorf("failed to cache index: %w", err) return } } - if !r.HasCacheFile() { - if _, err = r.CacheIndex(); err != nil { - err = fmt.Errorf("failed to strategically load index: %w", err) - return - } - } - if err = r.LoadFromCache(); err != nil { - err = fmt.Errorf("failed to strategically load index: %w", err) + if err = r.LoadFromPath(); err != nil { + err = fmt.Errorf("failed to load index: %w", err) return } return } -// LoadFromMemCache attempts to load the Index from the provided cache. -// It returns true if the Index was found in the cache, and false otherwise. -func (r *ChartRepository) LoadFromMemCache() bool { - if index, found := r.IndexCache.Get(r.IndexKey); found { - r.Lock() - r.Index = index.(*repo.IndexFile) - r.Unlock() - - // record the cache hit - if r.RecordIndexCacheMetric != nil { - r.RecordIndexCacheMetric(cache.CacheEventTypeHit) - } - return true - } +// LoadFromPath attempts to load the Index from the configured Path. +// It returns an error if no Path is set, or if the load failed. +func (r *ChartRepository) LoadFromPath() error { + r.Lock() + defer r.Unlock() - // record the cache miss - if r.RecordIndexCacheMetric != nil { - r.RecordIndexCacheMetric(cache.CacheEventTypeMiss) + if len(r.Path) == 0 { + return fmt.Errorf("no cache path") } - return false -} -// LoadFromCache attempts to load the Index from the configured CachePath. -// It returns an error if no CachePath is set, or if the load failed. -func (r *ChartRepository) LoadFromCache() error { - if cachePath := r.CachePath; cachePath != "" { - return r.LoadFromFile(cachePath) + i, err := IndexFromFile(r.Path) + if err != nil { + return fmt.Errorf("failed to load index: %w", err) } - return fmt.Errorf("no cache path set") + + r.Index = i + return nil } // DownloadIndex attempts to download the chart repository index using // the Client and set Options, and writes the index to the given io.Writer. -// It returns an url.Error if the URL failed to parse. -func (r *ChartRepository) DownloadIndex(w io.Writer) (err error) { +// Upon download, the index is copied to the writer if the index size +// does not exceed the maximum index file size. Otherwise, it returns an error. +// A url.Error is returned if the URL failed to parse. +func (r *ChartRepository) DownloadIndex(w io.Writer, maxSize int64) (err error) { + r.RLock() + defer r.RUnlock() + u, err := url.Parse(r.URL) if err != nil { return err @@ -444,79 +392,141 @@ func (r *ChartRepository) DownloadIndex(w io.Writer) (err error) { if err != nil { return err } + + if int64(res.Len()) > maxSize { + return fmt.Errorf("index exceeds the maximum index file size of %d bytes", maxSize) + } + if _, err = io.Copy(w, res); err != nil { return err } return nil } +// Digest returns the digest of the file at the ChartRepository's Path. +func (r *ChartRepository) Digest(algorithm digest.Algorithm) digest.Digest { + if !r.HasFile() { + return "" + } + + r.Lock() + defer r.Unlock() + + if _, ok := r.digests[algorithm]; !ok { + if f, err := os.Open(r.Path); err == nil { + defer f.Close() + rd := io.LimitReader(f, helm.MaxIndexSize) + if d, err := algorithm.FromReader(rd); err == nil { + r.digests[algorithm] = d + } + } + } + return r.digests[algorithm] +} + +// ToJSON returns the index formatted as JSON. +func (r *ChartRepository) ToJSON() ([]byte, error) { + if !r.HasIndex() { + return nil, fmt.Errorf("index not loaded yet") + } + + return json.MarshalIndent(r.Index, "", " ") +} + // HasIndex returns true if the Index is not nil. func (r *ChartRepository) HasIndex() bool { r.RLock() defer r.RUnlock() + return r.Index != nil } -// HasCacheFile returns true if CachePath is not empty. -func (r *ChartRepository) HasCacheFile() bool { +// HasFile returns true if Path exists and is a regular file. +func (r *ChartRepository) HasFile() bool { r.RLock() defer r.RUnlock() - return r.CachePath != "" -} -// Unload can be used to signal the Go garbage collector the Index can -// be freed from memory if the ChartRepository object is expected to -// continue to exist in the stack for some time. -func (r *ChartRepository) Unload() { - if r == nil { - return + if r.Path != "" { + if stat, err := os.Lstat(r.Path); err == nil { + return stat.Mode().IsRegular() + } } + return false +} +// Clear clears the Index and removes the file at Path, if cached. +func (r *ChartRepository) Clear() error { r.Lock() defer r.Unlock() + r.Index = nil -} -// Clear caches the index in memory before unloading it. -// It cleans up temporary files and directories created by the repository. -func (r *ChartRepository) Clear() error { - var errs []error - if err := r.CacheIndexInMemory(); err != nil { - errs = append(errs, err) + if r.cached { + if err := os.Remove(r.Path); err != nil { + return fmt.Errorf("failed to remove cached index: %w", err) + } + r.Path = "" + r.cached = false } - r.Unload() + r.invalidate() + return nil +} - if err := r.RemoveCache(); err != nil { - errs = append(errs, err) - } +// Invalidate clears any cached digests. +func (r *ChartRepository) Invalidate() { + r.Lock() + defer r.Unlock() - return kerrors.NewAggregate(errs) + r.invalidate() } -// SetMemCache sets the cache to use for this repository. -func (r *ChartRepository) SetMemCache(key string, c *cache.Cache, ttl time.Duration, rec RecordMetricsFunc) { - r.IndexKey = key - r.IndexCache = c - r.IndexTTL = ttl - r.RecordIndexCacheMetric = rec +func (r *ChartRepository) invalidate() { + r.digests = make(map[digest.Algorithm]digest.Digest, 0) } -// RemoveCache removes the CachePath if Cached. -func (r *ChartRepository) RemoveCache() error { - if r == nil { - return nil - } +// VerifyChart verifies the chart against a signature. +// It returns an error on failure. +func (r *ChartRepository) VerifyChart(_ context.Context, _ *repo.ChartVersion) (oci.VerificationResult, error) { + // this is a no-op because this is not implemented yet. + return oci.VerificationResultIgnored, fmt.Errorf("not implemented") +} - r.Lock() - defer r.Unlock() +// jsonOrYamlUnmarshal unmarshals the given byte slice containing JSON or YAML +// into the provided interface. +// +// It automatically detects whether the data is in JSON or YAML format by +// checking its validity as JSON. If the data is valid JSON, it will use the +// `encoding/json` package to unmarshal it. Otherwise, it will use the +// `sigs.k8s.io/yaml` package to unmarshal the YAML data. +// +// Can potentially be replaced when Helm PR for JSON support has been merged. +// xref: https://github.com/helm/helm/pull/12245 +func jsonOrYamlUnmarshal(b []byte, i interface{}) error { + if json.Valid(b) { + return json.Unmarshal(b, i) + } + return yaml.UnmarshalStrict(b, i) +} - if r.Cached { - if err := os.Remove(r.CachePath); err != nil && !os.IsNotExist(err) { - return err - } - r.CachePath = "" - r.Cached = false +// ignoreSkippableChartValidationError inspect the given error and returns nil if +// the error isn't important for index loading +// +// In particular, charts may introduce validations that don't impact repository indexes +// And repository indexes may be generated by older/non-complient software, which doesn't +// conform to all validations. +// +// this code is taken from https://github.com/helm/helm/blob/v3.15.2/pkg/repo/index.go#L402 +func ignoreSkippableChartValidationError(err error) error { + verr, ok := err.(chart.ValidationError) + if !ok { + return err } - return nil + + // https://github.com/helm/helm/issues/12748 (JFrog repository strips alias field from index) + if strings.HasPrefix(verr.Error(), "validation: more than one dependency with name or alias") { + return nil + } + + return err } diff --git a/internal/helm/repository/chart_repository_test.go b/internal/helm/repository/chart_repository_test.go index 4023345bd..1b2f1c0fb 100644 --- a/internal/helm/repository/chart_repository_test.go +++ b/internal/helm/repository/chart_repository_test.go @@ -18,28 +18,31 @@ package repository import ( "bytes" - "crypto/sha256" + "errors" "fmt" "net/url" "os" "path/filepath" + "sync" "testing" "time" - "github.com/fluxcd/source-controller/internal/cache" - "github.com/fluxcd/source-controller/internal/helm" . "github.com/onsi/gomega" + "github.com/opencontainers/go-digest" "helm.sh/helm/v3/pkg/chart" helmgetter "helm.sh/helm/v3/pkg/getter" "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/helm" ) var now = time.Now() const ( - testFile = "../testdata/local-index.yaml" - chartmuseumTestFile = "../testdata/chartmuseum-index.yaml" - unorderedTestFile = "../testdata/local-index-unordered.yaml" + testFile = "../testdata/local-index.yaml" + chartmuseumTestFile = "../testdata/chartmuseum-index.yaml" + chartmuseumJSONTestFile = "../testdata/chartmuseum-index.json" + unorderedTestFile = "../testdata/local-index-unordered.yaml" ) // mockGetter is a simple mocking getter.Getter implementation, returning @@ -55,6 +58,140 @@ func (g *mockGetter) Get(u string, _ ...helmgetter.Option) (*bytes.Buffer, error return bytes.NewBuffer(r), nil } +// Index load tests are derived from https://github.com/helm/helm/blob/v3.3.4/pkg/repo/index_test.go#L108 +// to ensure parity with Helm behaviour. +func TestIndexFromFile(t *testing.T) { + g := NewWithT(t) + + // Create an index file that exceeds the max index size. + tmpDir := t.TempDir() + bigIndexFile := filepath.Join(tmpDir, "index.yaml") + data := make([]byte, helm.MaxIndexSize+10) + g.Expect(os.WriteFile(bigIndexFile, data, 0o640)).ToNot(HaveOccurred()) + + tests := []struct { + name string + filename string + wantErr string + }{ + { + name: "regular index file", + filename: testFile, + }, + { + name: "chartmuseum index file", + filename: chartmuseumTestFile, + }, + { + name: "chartmuseum json index file", + filename: chartmuseumJSONTestFile, + }, + { + name: "error if index size exceeds max size", + filename: bigIndexFile, + wantErr: "exceeds the maximum index file size", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + i, err := IndexFromFile(tt.filename) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + + verifyLocalIndex(t, i) + }) + } +} + +func TestIndexFromBytes(t *testing.T) { + tests := []struct { + name string + b []byte + wantName string + wantVersion string + wantDigest string + wantErr string + }{ + { + name: "index", + b: []byte(` +apiVersion: v1 +entries: + nginx: + - urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" +`), + wantName: "nginx", + wantVersion: "0.2.0", + wantDigest: "sha256:1234567890abcdef", + }, + { + name: "index without API version", + b: []byte(`entries: + nginx: + - name: nginx`), + wantErr: "no API version specified", + }, + { + name: "index with duplicate entry", + b: []byte(`apiVersion: v1 +entries: + nginx: + - name: nginx" + nginx: + - name: nginx`), + wantErr: "key \"nginx\" already set in map", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + + i, err := IndexFromBytes(tt.b) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(i).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(i).ToNot(BeNil()) + got, err := i.Get(tt.wantName, tt.wantVersion) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got.Digest).To(Equal(tt.wantDigest)) + }) + } +} + +func TestIndexFromBytes_Unordered(t *testing.T) { + b, err := os.ReadFile(unorderedTestFile) + if err != nil { + t.Fatal(err) + } + i, err := IndexFromBytes(b) + if err != nil { + t.Fatal(err) + } + verifyLocalIndex(t, i) +} + func TestNewChartRepository(t *testing.T) { repositoryURL := "https://example.com" providers := helmgetter.Providers{ @@ -68,7 +205,7 @@ func TestNewChartRepository(t *testing.T) { t.Run("should construct chart repository", func(t *testing.T) { g := NewWithT(t) - r, err := NewChartRepository(repositoryURL, "", providers, nil, options) + r, err := NewChartRepository(repositoryURL, "", providers, nil, options...) g.Expect(err).ToNot(HaveOccurred()) g.Expect(r).ToNot(BeNil()) g.Expect(r.URL).To(Equal(repositoryURL)) @@ -95,7 +232,7 @@ func TestNewChartRepository(t *testing.T) { }) } -func TestChartRepository_Get(t *testing.T) { +func TestChartRepository_GetChartVersion(t *testing.T) { g := NewWithT(t) r := newChartRepository() @@ -252,277 +389,196 @@ func TestChartRepository_DownloadChart(t *testing.T) { } } -func TestChartRepository_DownloadIndex(t *testing.T) { +func TestChartRepository_CacheIndex(t *testing.T) { g := NewWithT(t) - b, err := os.ReadFile(chartmuseumTestFile) - g.Expect(err).ToNot(HaveOccurred()) + mg := mockGetter{Response: []byte("foo")} - mg := mockGetter{Response: b} - r := &ChartRepository{ - URL: "https://example.com", - Client: &mg, - } + r := newChartRepository() + r.URL = "https://example.com" + r.Client = &mg + r.digests["key"] = "value" - buf := bytes.NewBuffer([]byte{}) - g.Expect(r.DownloadIndex(buf)).To(Succeed()) - g.Expect(buf.Bytes()).To(Equal(b)) - g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) - g.Expect(err).To(BeNil()) -} + err := r.CacheIndex() + g.Expect(err).To(Not(HaveOccurred())) -func TestChartRepository_LoadIndexFromBytes(t *testing.T) { - tests := []struct { - name string - b []byte - wantName string - wantVersion string - wantDigest string - wantErr string - }{ - { - name: "index", - b: []byte(` -apiVersion: v1 -entries: - nginx: - - urls: - - https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz - name: nginx - description: string - version: 0.2.0 - home: https://github.com/something/else - digest: "sha256:1234567890abcdef" -`), - wantName: "nginx", - wantVersion: "0.2.0", - wantDigest: "sha256:1234567890abcdef", - }, - { - name: "index without API version", - b: []byte(`entries: - nginx: - - name: nginx`), - wantErr: "no API version specified", - }, - { - name: "index with duplicate entry", - b: []byte(`apiVersion: v1 -entries: - nginx: - - name: nginx" - nginx: - - name: nginx`), - wantErr: "key \"nginx\" already set in map", - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - t.Parallel() + g.Expect(r.Path).ToNot(BeEmpty()) + t.Cleanup(func() { _ = os.Remove(r.Path) }) - r := newChartRepository() - err := r.LoadIndexFromBytes(tt.b) - if tt.wantErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(r.Index).To(BeNil()) - return - } + g.Expect(r.Path).To(BeARegularFile()) + b, _ := os.ReadFile(r.Path) + g.Expect(b).To(Equal(mg.Response)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(r.Index).ToNot(BeNil()) - got, err := r.Index.Get(tt.wantName, tt.wantVersion) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got.Digest).To(Equal(tt.wantDigest)) - }) - } + g.Expect(r.digests).To(BeEmpty()) } -func TestChartRepository_LoadIndexFromBytes_Unordered(t *testing.T) { - b, err := os.ReadFile(unorderedTestFile) - if err != nil { - t.Fatal(err) - } +func TestChartRepository_ToJSON(t *testing.T) { + g := NewWithT(t) + r := newChartRepository() - err = r.LoadIndexFromBytes(b) - if err != nil { - t.Fatal(err) - } - verifyLocalIndex(t, r.Index) + r.Path = chartmuseumTestFile + + _, err := r.ToJSON() + g.Expect(err).To(HaveOccurred()) + + g.Expect(r.LoadFromPath()).To(Succeed()) + b, err := r.ToJSON() + g.Expect(err).ToNot(HaveOccurred()) + + jsonBytes, err := os.ReadFile(chartmuseumJSONTestFile) + jsonBytes = bytes.TrimRight(jsonBytes, "\n") + g.Expect(err).To(Not(HaveOccurred())) + g.Expect(string(b)).To(Equal(string(jsonBytes))) } -// Index load tests are derived from https://github.com/helm/helm/blob/v3.3.4/pkg/repo/index_test.go#L108 -// to ensure parity with Helm behaviour. -func TestChartRepository_LoadIndexFromFile(t *testing.T) { +func TestChartRepository_DownloadIndex(t *testing.T) { g := NewWithT(t) - // Create an index file that exceeds the max index size. - tmpDir := t.TempDir() - bigIndexFile := filepath.Join(tmpDir, "index.yaml") - data := make([]byte, helm.MaxIndexSize+10) - g.Expect(os.WriteFile(bigIndexFile, data, 0o640)).ToNot(HaveOccurred()) + b, err := os.ReadFile(chartmuseumTestFile) + g.Expect(err).ToNot(HaveOccurred()) - tests := []struct { - name string - filename string - wantErr string - }{ - { - name: "regular index file", - filename: testFile, - }, - { - name: "chartmuseum index file", - filename: chartmuseumTestFile, - }, - { - name: "error if index size exceeds max size", - filename: bigIndexFile, - wantErr: "size of index 'index.yaml' exceeds", - }, + mg := mockGetter{Response: b} + r := &ChartRepository{ + URL: "https://example.com", + Client: &mg, + RWMutex: &sync.RWMutex{}, } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) + t.Run("download index", func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + g.Expect(r.DownloadIndex(buf, helm.MaxIndexSize)).To(Succeed()) + g.Expect(buf.Bytes()).To(Equal(b)) + g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) + g.Expect(err).To(BeNil()) + }) - r := newChartRepository() - err := r.LoadFromFile(tt.filename) - if tt.wantErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - return - } + t.Run("download index size error", func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + g.Expect(r.DownloadIndex(buf, int64(len(b)-1))).To(HaveOccurred()) + g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) + }) +} - g.Expect(err).ToNot(HaveOccurred()) +func TestChartRepository_StrategicallyLoadIndex(t *testing.T) { + t.Run("loads from path", func(t *testing.T) { + g := NewWithT(t) - verifyLocalIndex(t, r.Index) + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + + r := newChartRepository() + r.Path = i + + err := r.StrategicallyLoadIndex() + g.Expect(err).To(Succeed()) + g.Expect(r.Index).ToNot(BeNil()) + }) + + t.Run("loads from client", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Client = &mockGetter{ + Response: []byte(`apiVersion: v1`), + } + t.Cleanup(func() { + _ = os.Remove(r.Path) }) - } + + err := r.StrategicallyLoadIndex() + g.Expect(err).To(Succeed()) + g.Expect(r.Path).ToNot(BeEmpty()) + g.Expect(r.Index).ToNot(BeNil()) + }) + + t.Run("skips if index is already loaded", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Index = repo.NewIndexFile() + + g.Expect(r.StrategicallyLoadIndex()).To(Succeed()) + }) } -func TestChartRepository_CacheIndex(t *testing.T) { - g := NewWithT(t) +func TestChartRepository_LoadFromPath(t *testing.T) { + t.Run("loads index", func(t *testing.T) { + g := NewWithT(t) - mg := mockGetter{Response: []byte("foo")} - expectSum := fmt.Sprintf("%x", sha256.Sum256(mg.Response)) + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) - r := newChartRepository() - r.URL = "https://example.com" - r.Client = &mg + r := newChartRepository() + r.Path = i - sum, err := r.CacheIndex() - g.Expect(err).To(Not(HaveOccurred())) + g.Expect(r.LoadFromPath()).To(Succeed()) + g.Expect(r.Index).ToNot(BeNil()) + }) - g.Expect(r.CachePath).ToNot(BeEmpty()) - defer os.RemoveAll(r.CachePath) - g.Expect(r.CachePath).To(BeARegularFile()) - b, _ := os.ReadFile(r.CachePath) + t.Run("no cache path", func(t *testing.T) { + g := NewWithT(t) - g.Expect(b).To(Equal(mg.Response)) - g.Expect(sum).To(BeEquivalentTo(expectSum)) + err := newChartRepository().LoadFromPath() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("no cache path")) + }) + + t.Run("index load error", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Path = filepath.Join(t.TempDir(), "index.yaml") + + err := r.LoadFromPath() + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue()) + }) } -func TestChartRepository_StrategicallyLoadIndex(t *testing.T) { - g := NewWithT(t) +func TestChartRepository_Digest(t *testing.T) { + t.Run("with algorithm", func(t *testing.T) { + g := NewWithT(t) - r := newChartRepository() - r.Index = repo.NewIndexFile() - g.Expect(r.StrategicallyLoadIndex()).To(Succeed()) - g.Expect(r.CachePath).To(BeEmpty()) - g.Expect(r.Cached).To(BeFalse()) + p := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(repo.NewIndexFile().WriteFile(p, 0o600)).To(Succeed()) - r.Index = nil - r.CachePath = "/invalid/cache/index/path.yaml" - err := r.StrategicallyLoadIndex() - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring("/invalid/cache/index/path.yaml: no such file or directory")) - g.Expect(r.Cached).To(BeFalse()) + r := newChartRepository() + r.Path = p - r.CachePath = "" - r.Client = &mockGetter{} - err = r.StrategicallyLoadIndex() - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring("no API version specified")) - g.Expect(r.Cached).To(BeTrue()) - g.Expect(r.RemoveCache()).To(Succeed()) -} + for _, algo := range []digest.Algorithm{digest.SHA256, digest.SHA512} { + t.Run(algo.String(), func(t *testing.T) { + g := NewWithT(t) -func TestChartRepository_CacheIndexInMemory(t *testing.T) { - g := NewWithT(t) + d := r.Digest(algo) + g.Expect(d).ToNot(BeEmpty()) + g.Expect(d.Algorithm()).To(Equal(algo)) + g.Expect(r.digests[algo]).To(Equal(d)) + }) + } + }) - interval, _ := time.ParseDuration("5s") - memCache := cache.New(1, interval) - indexPath := "/multi-tenent-safe/mock/index.yaml" - r := newChartRepository() - r.Index = repo.NewIndexFile() - indexFile := *r.Index - g.Expect( - indexFile.MustAdd( - &chart.Metadata{ - Name: "grafana", - Version: "6.17.4", - }, - "grafana-6.17.4.tgz", - "http://example.com/charts", - "sha256:1234567890abc", - )).To(Succeed()) - indexFile.WriteFile(indexPath, 0o640) - ttl, _ := time.ParseDuration("1m") - r.SetMemCache(indexPath, memCache, ttl, func(event string) { - fmt.Println(event) + t.Run("without path", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + g.Expect(r.Digest(digest.SHA256)).To(BeEmpty()) }) - r.CacheIndexInMemory() - _, cacheHit := r.IndexCache.Get(indexPath) - g.Expect(cacheHit).To(Equal(true)) - r.Unload() - g.Expect(r.Index).To(BeNil()) - g.Expect(r.StrategicallyLoadIndex()).To(Succeed()) - g.Expect(r.Index.Entries["grafana"][0].Digest).To(Equal("sha256:1234567890abc")) -} -func TestChartRepository_LoadFromCache(t *testing.T) { - tests := []struct { - name string - cachePath string - wantErr string - }{ - { - name: "cache path", - cachePath: chartmuseumTestFile, - }, - { - name: "invalid cache path", - cachePath: "invalid", - wantErr: "stat invalid: no such file", - }, - { - name: "no cache path", - cachePath: "", - wantErr: "no cache path set", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) + t.Run("from cache", func(t *testing.T) { + g := NewWithT(t) - r := newChartRepository() - r.CachePath = tt.cachePath - err := r.LoadFromCache() - if tt.wantErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(r.Index).To(BeNil()) - return - } + algo := digest.SHA256 + expect := digest.Digest("sha256:fake") - g.Expect(err).ToNot(HaveOccurred()) - verifyLocalIndex(t, r.Index) - }) - } + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + + r := newChartRepository() + r.Path = i + r.digests[algo] = expect + + g.Expect(r.Digest(algo)).To(Equal(expect)) + }) } func TestChartRepository_HasIndex(t *testing.T) { @@ -534,30 +590,89 @@ func TestChartRepository_HasIndex(t *testing.T) { g.Expect(r.HasIndex()).To(BeTrue()) } -func TestChartRepository_HasCacheFile(t *testing.T) { +func TestChartRepository_HasFile(t *testing.T) { g := NewWithT(t) r := newChartRepository() - g.Expect(r.HasCacheFile()).To(BeFalse()) - r.CachePath = "foo" - g.Expect(r.HasCacheFile()).To(BeTrue()) + g.Expect(r.HasFile()).To(BeFalse()) + + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + r.Path = i + g.Expect(r.HasFile()).To(BeTrue()) } -func TestChartRepository_UnloadIndex(t *testing.T) { +func TestChartRepository_Clear(t *testing.T) { + t.Run("without index", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + g.Expect(r.Clear()).To(Succeed()) + }) + + t.Run("with index", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Index = repo.NewIndexFile() + + g.Expect(r.Clear()).To(Succeed()) + g.Expect(r.Index).To(BeNil()) + }) + + t.Run("with index and cached path", func(t *testing.T) { + g := NewWithT(t) + + f, err := os.CreateTemp(t.TempDir(), "index-*.yaml") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f.Close()).To(Succeed()) + + r := newChartRepository() + r.Path = f.Name() + r.Index = repo.NewIndexFile() + r.digests["key"] = "value" + r.cached = true + + g.Expect(r.Clear()).To(Succeed()) + g.Expect(r.Index).To(BeNil()) + g.Expect(r.Path).To(BeEmpty()) + g.Expect(r.digests).To(BeEmpty()) + g.Expect(r.cached).To(BeFalse()) + }) + + t.Run("with path", func(t *testing.T) { + g := NewWithT(t) + + f, err := os.CreateTemp(t.TempDir(), "index-*.yaml") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f.Close()).To(Succeed()) + + r := newChartRepository() + r.Path = f.Name() + r.digests["key"] = "value" + + g.Expect(r.Clear()).To(Succeed()) + g.Expect(r.Path).ToNot(BeEmpty()) + g.Expect(r.Path).To(BeARegularFile()) + g.Expect(r.digests).To(BeEmpty()) + }) +} + +func TestChartRepository_Invalidate(t *testing.T) { g := NewWithT(t) r := newChartRepository() - g.Expect(r.HasIndex()).To(BeFalse()) - r.Index = repo.NewIndexFile() - r.Unload() - g.Expect(r.Index).To(BeNil()) + r.digests["key"] = "value" + + r.Invalidate() + g.Expect(r.digests).To(BeEmpty()) } func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { g := NewWithT(t) g.Expect(i.Entries).ToNot(BeNil()) - g.Expect(i.Entries).To(HaveLen(3), "expected 3 entries in index file") + g.Expect(i.Entries).To(HaveLen(4), "expected 4 entries in index file") alpine, ok := i.Entries["alpine"] g.Expect(ok).To(BeTrue(), "expected 'alpine' entry to exist") @@ -567,6 +682,10 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { g.Expect(ok).To(BeTrue(), "expected 'nginx' entry to exist") g.Expect(nginx).To(HaveLen(2), "'nginx' should have 2 entries") + broken, ok := i.Entries["xChartWithDuplicateDependenciesAndMissingAlias"] + g.Expect(ok).To(BeTrue(), "expected 'xChartWithDuplicateDependenciesAndMissingAlias' entry to exist") + g.Expect(broken).To(HaveLen(1), "'xChartWithDuplicateDependenciesAndMissingAlias' should have 1 entries") + expects := []*repo.ChartVersion{ { Metadata: &chart.Metadata{ @@ -608,8 +727,24 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { }, Digest: "sha256:1234567890abcdef", }, + { + Metadata: &chart.Metadata{ + Name: "xChartWithDuplicateDependenciesAndMissingAlias", + Description: "string", + Version: "1.2.3", + Keywords: []string{"broken", "still accepted"}, + Home: "https://example.com/something", + Dependencies: []*chart.Dependency{ + {Name: "kube-rbac-proxy", Version: "0.9.1"}, + }, + }, + URLs: []string{ + "https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz", + }, + Digest: "sha256:1234567890abcdef", + }, } - tests := []*repo.ChartVersion{alpine[0], nginx[0], nginx[1]} + tests := []*repo.ChartVersion{alpine[0], nginx[0], nginx[1], broken[0]} for i, tt := range tests { expect := expects[i] @@ -620,29 +755,129 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { g.Expect(tt.Home).To(Equal(expect.Home)) g.Expect(tt.URLs).To(ContainElements(expect.URLs)) g.Expect(tt.Keywords).To(ContainElements(expect.Keywords)) + g.Expect(tt.Dependencies).To(ContainElements(expect.Dependencies)) } } -func TestChartRepository_RemoveCache(t *testing.T) { - g := NewWithT(t) +// This code is taken from https://github.com/helm/helm/blob/v3.15.2/pkg/repo/index_test.go#L601 +// and refers to: https://github.com/helm/helm/issues/12748 +func TestIgnoreSkippableChartValidationError(t *testing.T) { + type TestCase struct { + Input error + ErrorSkipped bool + } + testCases := map[string]TestCase{ + "nil": { + Input: nil, + }, + "generic_error": { + Input: fmt.Errorf("foo"), + }, + "non_skipped_validation_error": { + Input: chart.ValidationError("chart.metadata.type must be application or library"), + }, + "skipped_validation_error": { + Input: chart.ValidationErrorf("more than one dependency with name or alias %q", "foo"), + ErrorSkipped: true, + }, + } - tmpFile, err := os.CreateTemp("", "remove-cache-") - g.Expect(err).ToNot(HaveOccurred()) - defer os.Remove(tmpFile.Name()) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := ignoreSkippableChartValidationError(tc.Input) - r := newChartRepository() - r.CachePath = tmpFile.Name() - r.Cached = true + if tc.Input == nil { + if result != nil { + t.Error("expected nil result for nil input") + } + return + } - g.Expect(r.RemoveCache()).To(Succeed()) - g.Expect(r.CachePath).To(BeEmpty()) - g.Expect(r.Cached).To(BeFalse()) - g.Expect(tmpFile.Name()).ToNot(BeAnExistingFile()) + if tc.ErrorSkipped { + if result != nil { + t.Error("expected nil result for skipped error") + } + return + } + + if tc.Input != result { + t.Error("expected the result equal to input") + } + + }) + } +} - r.CachePath = tmpFile.Name() - r.Cached = true +var indexWithFirstVersionInvalid = ` +apiVersion: v1 +entries: + nginx: + - urls: + - https://charts.helm.sh/stable/alpine-1.0.0.tgz + - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz + name: nginx + version: 0..1.0 + description: string + home: https://github.com/something + digest: "sha256:1234567890abcdef" + - urls: + - https://charts.helm.sh/stable/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" +` +var indexWithLastVersionInvalid = ` +apiVersion: v1 +entries: + nginx: + - urls: + - https://charts.helm.sh/stable/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" + - urls: + - https://charts.helm.sh/stable/alpine-1.0.0.tgz + - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz + name: nginx + version: 0..1.0 + description: string + home: https://github.com/something + digest: "sha256:1234567890abcdef" +` - g.Expect(r.RemoveCache()).To(Succeed()) - g.Expect(r.CachePath).To(BeEmpty()) - g.Expect(r.Cached).To(BeFalse()) +func TestIndexFromBytes_InvalidEntries(t *testing.T) { + tests := []struct { + source string + data string + }{ + { + source: "indexWithFirstVersionInvalid", + data: indexWithFirstVersionInvalid, + }, + { + source: "indexWithLastVersionInvalid", + data: indexWithLastVersionInvalid, + }, + } + for _, tc := range tests { + t.Run(tc.source, func(t *testing.T) { + idx, err := IndexFromBytes([]byte(tc.data)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + cvs := idx.Entries["nginx"] + if len(cvs) == 0 { + t.Error("expected one chart version not to be filtered out") + } + for _, v := range cvs { + if v.Version == "0..1.0" { + t.Error("malformed version was not filtered out") + } + } + }) + } } diff --git a/internal/helm/repository/errors.go b/internal/helm/repository/errors.go new file mode 100644 index 000000000..d8d57059e --- /dev/null +++ b/internal/helm/repository/errors.go @@ -0,0 +1,47 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +// ErrReference indicate invalid chart reference. +type ErrReference struct { + Err error +} + +// Error implements the error interface. +func (er *ErrReference) Error() string { + return er.Err.Error() +} + +// Unwrap returns the underlying error. +func (er *ErrReference) Unwrap() error { + return er.Err +} + +// ErrExternal is a generic error for errors related to external API calls. +type ErrExternal struct { + Err error +} + +// Error implements the error interface. +func (ee *ErrExternal) Error() string { + return ee.Err.Error() +} + +// Unwrap returns the underlying error. +func (ee *ErrExternal) Unwrap() error { + return ee.Err +} diff --git a/internal/helm/repository/oci_chart_repository.go b/internal/helm/repository/oci_chart_repository.go index 417a52818..2bed964a2 100644 --- a/internal/helm/repository/oci_chart_repository.go +++ b/internal/helm/repository/oci_chart_repository.go @@ -18,7 +18,9 @@ package repository import ( "bytes" + "context" "crypto/tls" + "errors" "fmt" "net/url" "os" @@ -32,8 +34,11 @@ import ( "helm.sh/helm/v3/pkg/repo" "github.com/Masterminds/semver/v3" + "github.com/google/go-containerregistry/pkg/name" + + "github.com/fluxcd/pkg/http/transport" "github.com/fluxcd/pkg/version" - "github.com/fluxcd/source-controller/internal/transport" + "github.com/fluxcd/source-controller/internal/oci" ) // RegistryClient is an interface for interacting with OCI registries @@ -61,14 +66,39 @@ type OCIChartRepository struct { // RegistryClient is a client to use while downloading tags or charts from a registry. RegistryClient RegistryClient + // credentialsFile is a temporary credentials file to use while downloading tags or charts from a registry. credentialsFile string + + // certificatesStore is a temporary store to use while downloading tags or charts from a registry. + certificatesStore string + + // verifiers is a list of verifiers to use when verifying a chart. + verifiers []oci.Verifier + + // insecureHTTP indicates that the chart is hosted on an insecure HTTP registry. + insecureHTTP bool } // OCIChartRepositoryOption is a function that can be passed to NewOCIChartRepository // to configure an OCIChartRepository. type OCIChartRepositoryOption func(*OCIChartRepository) error +// WithVerifiers returns a ChartRepositoryOption that will set the chart verifiers +func WithVerifiers(verifiers []oci.Verifier) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.verifiers = verifiers + return nil + } +} + +func WithInsecureHTTP() OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.insecureHTTP = true + return nil + } +} + // WithOCIRegistryClient returns a ChartRepositoryOption that will set the registry client func WithOCIRegistryClient(client RegistryClient) OCIChartRepositoryOption { return func(r *OCIChartRepository) error { @@ -105,6 +135,14 @@ func WithCredentialsFile(credentialsFile string) OCIChartRepositoryOption { } } +// WithCertificatesStore returns a ChartRepositoryOption that will set the certificates store +func WithCertificatesStore(store string) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.certificatesStore = store + return nil + } +} + // NewOCIChartRepository constructs and returns a new ChartRepository with // the ChartRepository.Client configured to the getter.Getter for the // repository URL scheme. It returns an error on URL parsing failures. @@ -131,7 +169,14 @@ func NewOCIChartRepository(repositoryURL string, chartRepoOpts ...OCIChartReposi // stable version will be returned and prerelease versions will be ignored. // adapted from https://github.com/helm/helm/blob/49819b4ef782e80b0c7f78c30bd76b51ebb56dc8/pkg/downloader/chart_downloader.go#L162 func (r *OCIChartRepository) GetChartVersion(name, ver string) (*repo.ChartVersion, error) { + cv, err := r.getChartVersion(name, ver) + if err != nil { + return nil, &ErrExternal{Err: err} + } + return cv, nil +} +func (r *OCIChartRepository) getChartVersion(name, ver string) (*repo.ChartVersion, error) { cpURL := r.URL cpURL.Path = path.Join(cpURL.Path, name) @@ -209,7 +254,11 @@ func (r *OCIChartRepository) DownloadChart(chart *repo.ChartVersion) (*bytes.Buf defer transport.Release(t) // trim the oci scheme prefix if needed - return r.Client.Get(strings.TrimPrefix(u.String(), fmt.Sprintf("%s://", registry.OCIScheme)), clientOpts...) + b, err := r.Client.Get(strings.TrimPrefix(u.String(), fmt.Sprintf("%s://", registry.OCIScheme)), clientOpts...) + if err != nil { + return nil, fmt.Errorf("failed to get '%s': %w", ref, err) + } + return b, nil } // Login attempts to login to the OCI registry. @@ -239,14 +288,24 @@ func (r *OCIChartRepository) HasCredentials() bool { // Clear deletes the OCI registry credentials file. func (r *OCIChartRepository) Clear() error { + var errs error // clean the credentials file if it exists if r.credentialsFile != "" { if err := os.Remove(r.credentialsFile); err != nil { - return err + errs = errors.Join(errs, err) } } r.credentialsFile = "" - return nil + + // clean the certificates store if it exists + if r.certificatesStore != "" { + if err := os.RemoveAll(r.certificatesStore); err != nil { + errs = errors.Join(errs, err) + } + } + r.certificatesStore = "" + + return errs } // getLastMatchingVersionOrConstraint returns the last version that matches the given version string. @@ -296,3 +355,47 @@ func getLastMatchingVersionOrConstraint(cvs []string, ver string) (string, error return matchingVersions[0].Original(), nil } + +// VerifyChart verifies the chart against a signature. +// Supports signature verification using either cosign or notation providers. +// If no signature is provided, when cosign is used, a keyless verification is performed. +// The verification result is returned as a VerificationResult and any error encountered. +func (r *OCIChartRepository) VerifyChart(ctx context.Context, chart *repo.ChartVersion) (oci.VerificationResult, error) { + if len(r.verifiers) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("no verifiers available") + } + + if len(chart.URLs) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("chart '%s' has no downloadable URLs", chart.Name) + } + + var nameOpts []name.Option + if r.insecureHTTP { + nameOpts = append(nameOpts, name.Insecure) + } + + ref, err := name.ParseReference(strings.TrimPrefix(chart.URLs[0], fmt.Sprintf("%s://", registry.OCIScheme)), nameOpts...) + if err != nil { + return oci.VerificationResultFailed, fmt.Errorf("invalid chart reference: %s", err) + } + + verificationResult := oci.VerificationResultFailed + + // verify the chart + for _, verifier := range r.verifiers { + result, err := verifier.Verify(ctx, ref) + if err != nil { + return result, fmt.Errorf("failed to verify %s: %w", chart.URLs[0], err) + } + if result == oci.VerificationResultSuccess { + return result, nil + } + verificationResult = result + } + + if verificationResult == oci.VerificationResultIgnored { + return verificationResult, nil + } + + return oci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref.Name()) +} diff --git a/internal/helm/repository/oci_chart_repository_test.go b/internal/helm/repository/oci_chart_repository_test.go index 1ef12a860..504d44e3e 100644 --- a/internal/helm/repository/oci_chart_repository_test.go +++ b/internal/helm/repository/oci_chart_repository_test.go @@ -210,7 +210,6 @@ func TestOCIChartRepository_Get(t *testing.T) { } func TestOCIChartRepository_DownloadChart(t *testing.T) { - client := &mockRegistryClient{} testCases := []struct { name string url string @@ -225,7 +224,7 @@ func TestOCIChartRepository_DownloadChart(t *testing.T) { Metadata: &chart.Metadata{Name: "chart"}, URLs: []string{"oci://localhost:5000/my_repo/podinfo:1.0.0"}, }, - expected: "oci://localhost:5000/my_repo/podinfo:1.0.0", + expected: "localhost:5000/my_repo/podinfo:1.0.0", }, { name: "no chart URL", @@ -245,19 +244,21 @@ func TestOCIChartRepository_DownloadChart(t *testing.T) { } for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) t.Parallel() - mg := OCIMockGetter{} + + g := NewWithT(t) + u, err := url.Parse(tc.url) g.Expect(err).ToNot(HaveOccurred()) + + mg := OCIMockGetter{} r := OCIChartRepository{ Client: &mg, URL: *u, } - r.Client = &mg - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(r).ToNot(BeNil()) + res, err := r.DownloadChart(tc.chartVersion) if tc.expectedErr { g.Expect(err).To(HaveOccurred()) @@ -265,7 +266,7 @@ func TestOCIChartRepository_DownloadChart(t *testing.T) { } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(client.LastCalledURL).To(Equal(tc.expected)) + g.Expect(mg.LastCalledURL).To(Equal(tc.expected)) g.Expect(res).ToNot(BeNil()) g.Expect(err).ToNot(HaveOccurred()) }) diff --git a/internal/helm/repository/repository.go b/internal/helm/repository/repository.go index 4c8cb7ff8..6cee5f658 100644 --- a/internal/helm/repository/repository.go +++ b/internal/helm/repository/repository.go @@ -18,8 +18,11 @@ package repository import ( "bytes" + "context" "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/oci" ) // Downloader is used to download a chart from a remote Helm repository or OCI Helm repository. @@ -29,6 +32,8 @@ type Downloader interface { GetChartVersion(name, version string) (*repo.ChartVersion, error) // DownloadChart downloads a chart from the remote Helm repository or OCI Helm repository. DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer, error) + // VerifyChart verifies the chart against a signature. + VerifyChart(ctx context.Context, chart *repo.ChartVersion) (oci.VerificationResult, error) // Clear removes all temporary files created by the downloader, caching the files if the cache is configured, // and calling garbage collector to remove unused files. Clear() error diff --git a/internal/helm/repository/utils.go b/internal/helm/repository/utils.go index 5d5ab2548..b784dec0d 100644 --- a/internal/helm/repository/utils.go +++ b/internal/helm/repository/utils.go @@ -18,6 +18,7 @@ package repository import ( "fmt" + "net/url" "strings" helmreg "helm.sh/helm/v3/pkg/registry" @@ -35,17 +36,28 @@ var ( ) // NormalizeURL normalizes a ChartRepository URL by its scheme. -func NormalizeURL(repositoryURL string) string { +func NormalizeURL(repositoryURL string) (string, error) { if repositoryURL == "" { - return "" + return "", nil } - - if strings.Contains(repositoryURL, helmreg.OCIScheme) { - return strings.TrimRight(repositoryURL, "/") + u, err := url.Parse(repositoryURL) + if err != nil { + return "", err } - return strings.TrimRight(repositoryURL, "/") + "/" + if u.Scheme == helmreg.OCIScheme { + u.Path = strings.TrimRight(u.Path, "/") + // we perform the same operation on u.RawPath so that it will be a valid encoding + // of u.Path. This allows u.EscapedPath() (which is used in computing u.String()) to return + // the correct value when the path is url encoded. + // ref: https://pkg.go.dev/net/url#URL.EscapedPath + u.RawPath = strings.TrimRight(u.RawPath, "/") + return u.String(), nil + } + u.Path = strings.TrimRight(u.Path, "/") + "/" + u.RawPath = strings.TrimRight(u.RawPath, "/") + "/" + return u.String(), nil } // ValidateDepURL returns an error if the given depended repository URL declaration is not supported diff --git a/internal/helm/repository/utils_test.go b/internal/helm/repository/utils_test.go index 3ee77606d..a1fa2dcaa 100644 --- a/internal/helm/repository/utils_test.go +++ b/internal/helm/repository/utils_test.go @@ -24,9 +24,10 @@ import ( func TestNormalizeURL(t *testing.T) { tests := []struct { - name string - url string - want string + name string + url string + want string + wantErr bool }{ { name: "with slash", @@ -43,11 +44,6 @@ func TestNormalizeURL(t *testing.T) { url: "http://example.com//", want: "http://example.com/", }, - { - name: "empty", - url: "", - want: "", - }, { name: "oci with slash", url: "oci://example.com/", @@ -58,12 +54,48 @@ func TestNormalizeURL(t *testing.T) { url: "oci://example.com//", want: "oci://example.com", }, + { + name: "url with query", + url: "http://example.com?st=pr", + want: "http://example.com/?st=pr", + }, + { + name: "url with slash and query", + url: "http://example.com/?st=pr", + want: "http://example.com/?st=pr", + }, + { + name: "url with encoded path", + url: "http://example.com/next%2Fpath", + want: "http://example.com/next%2Fpath/", + }, + { + name: "url with encoded path and slash", + url: "http://example.com/next%2Fpath/", + want: "http://example.com/next%2Fpath/", + }, + { + name: "empty url", + url: "", + want: "", + }, + { + name: "bad url", + url: "://badurl.", + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got := NormalizeURL(tt.url) + got, err := NormalizeURL(tt.url) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).To(Not(HaveOccurred())) g.Expect(got).To(Equal(tt.want)) }) } diff --git a/internal/helm/testdata/chartmuseum-index.json b/internal/helm/testdata/chartmuseum-index.json new file mode 100644 index 000000000..15ba3e704 --- /dev/null +++ b/internal/helm/testdata/chartmuseum-index.json @@ -0,0 +1,112 @@ +{ + "serverInfo": { + "contextPath": "/v1/helm" + }, + "apiVersion": "v1", + "generated": "0001-01-01T00:00:00Z", + "entries": { + "alpine": [ + { + "name": "alpine", + "home": "https://github.com/something", + "version": "1.0.0", + "description": "string", + "keywords": [ + "linux", + "alpine", + "small", + "sumtin" + ], + "apiVersion": "v1", + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/alpine-1.0.0.tgz", + "http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ], + "chartWithNoURL": [ + { + "name": "chartWithNoURL", + "home": "https://github.com/something", + "version": "1.0.0", + "description": "string", + "keywords": [ + "small", + "sumtin" + ], + "apiVersion": "v1", + "urls": null, + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ], + "nginx": [ + { + "name": "nginx", + "home": "https://github.com/something/else", + "version": "0.2.0", + "description": "string", + "keywords": [ + "popular", + "web server", + "proxy" + ], + "apiVersion": "v1", + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + }, + { + "name": "nginx", + "home": "https://github.com/something", + "version": "0.1.0", + "description": "string", + "keywords": [ + "popular", + "web server", + "proxy" + ], + "apiVersion": "v1", + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-0.1.0.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ], + "xChartWithDuplicateDependenciesAndMissingAlias": [ + { + "name": "xChartWithDuplicateDependenciesAndMissingAlias", + "home": "https://example.com/something", + "version": "1.2.3", + "description": "string", + "keywords": [ + "broken", + "still accepted" + ], + "apiVersion": "v1", + "dependencies": [ + { + "name": "kube-rbac-proxy", + "version": "0.9.1", + "repository": "" + }, + { + "name": "kube-rbac-proxy", + "version": "0.9.1", + "repository": "" + } + ], + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ] + } +} diff --git a/internal/helm/testdata/chartmuseum-index.yaml b/internal/helm/testdata/chartmuseum-index.yaml index 3077596f4..ab00c1807 100644 --- a/internal/helm/testdata/chartmuseum-index.yaml +++ b/internal/helm/testdata/chartmuseum-index.yaml @@ -48,3 +48,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/helm/testdata/charts/helmchart-badname-0.1.0.tgz b/internal/helm/testdata/charts/helmchart-badname-0.1.0.tgz new file mode 100644 index 000000000..1f6675d5c Binary files /dev/null and b/internal/helm/testdata/charts/helmchart-badname-0.1.0.tgz differ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore b/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock new file mode 100644 index 000000000..83401ac65 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: grafana + repository: https://grafana.github.io/helm-charts + version: 6.17.4 +digest: sha256:1e41c97e27347f433ff0212bf52c344bc82dd435f70129d15e96cd2c8fcc32bb +generated: "2021-11-02T01:25:59.624290788+01:00" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml new file mode 100644 index 000000000..1e32b80ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +name: helmchartwithdeps +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 + +dependencies: + - name: helmchart + version: "0.1.0" + - name: helmchart + alias: aliased + version: "0.1.0" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml new file mode 100644 index 000000000..46eaf150b --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: helmchart +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt new file mode 100644 index 000000000..741a77d8e --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchart.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchart.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl new file mode 100644 index 000000000..f6431fcb2 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchart.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchart.labels" -}} +helm.sh/chart: {{ include "helmchart.chart" . }} +{{ include "helmchart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "helmchart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "helmchart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchart.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml new file mode 100644 index 000000000..daa9f8e56 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchart.fullname" . }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "helmchart.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "helmchart.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "helmchart.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml new file mode 100644 index 000000000..c2069e9c8 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchart.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml new file mode 100644 index 000000000..12e16ef71 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchart.fullname" . }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "helmchart.selectorLabels" . | nindent 4 }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml new file mode 100644 index 000000000..da3512648 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "helmchart.serviceAccountName" . }} + labels: +{{ include "helmchart.labels" . | nindent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml new file mode 100644 index 000000000..11b0b1a96 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchart.fullname" . }}-test-connection" + labels: +{{ include "helmchart.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchart.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml new file mode 100644 index 000000000..5ef7832ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml @@ -0,0 +1 @@ +replicaCount: 2 diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml new file mode 100644 index 000000000..40e7aa0b6 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml @@ -0,0 +1,66 @@ +# Default values for helmchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt new file mode 100644 index 000000000..105423d28 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchartwithdeps.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchartwithdeps.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchartwithdeps.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchartwithdeps.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl new file mode 100644 index 000000000..a718f8b32 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchartwithdeps.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchartwithdeps.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchartwithdeps.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchartwithdeps.labels" -}} +helm.sh/chart: {{ include "helmchartwithdeps.chart" . }} +{{ include "helmchartwithdeps.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "helmchartwithdeps.selectorLabels" -}} +app.kubernetes.io/name: {{ include "helmchartwithdeps.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchartwithdeps.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchartwithdeps.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml new file mode 100644 index 000000000..08f62c740 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchartwithdeps.fullname" . }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "helmchartwithdeps.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml new file mode 100644 index 000000000..6c1b03148 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchartwithdeps.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml new file mode 100644 index 000000000..2c270c67b --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchartwithdeps.fullname" . }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 4 }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml new file mode 100644 index 000000000..2eec29c55 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "helmchartwithdeps.serviceAccountName" . }} + labels: +{{ include "helmchartwithdeps.labels" . | nindent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml new file mode 100644 index 000000000..bbcd09201 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchartwithdeps.fullname" . }}-test-connection" + labels: +{{ include "helmchartwithdeps.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchartwithdeps.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml new file mode 100644 index 000000000..8213f28c1 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml @@ -0,0 +1,66 @@ +# Default values for helmchartwithdeps. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/internal/helm/testdata/local-index-unordered.yaml b/internal/helm/testdata/local-index-unordered.yaml index 7482baaae..91ad62f1e 100644 --- a/internal/helm/testdata/local-index-unordered.yaml +++ b/internal/helm/testdata/local-index-unordered.yaml @@ -46,3 +46,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/helm/testdata/local-index.yaml b/internal/helm/testdata/local-index.yaml index e680d2a3e..56c0ac2c3 100644 --- a/internal/helm/testdata/local-index.yaml +++ b/internal/helm/testdata/local-index.yaml @@ -46,3 +46,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/index/digest.go b/internal/index/digest.go new file mode 100644 index 000000000..1f7bd642f --- /dev/null +++ b/internal/index/digest.go @@ -0,0 +1,221 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" + + "github.com/opencontainers/go-digest" +) + +// Digester is a simple string key value index that can be used to calculate +// digests of the index. The digests are cached, and only recalculated if the +// index has changed. +type Digester struct { + // index is the map of keys and their associated values. + index map[string]string + + // digests is a cache of digests calculated for the index. + digests map[digest.Algorithm]digest.Digest + + mu sync.RWMutex +} + +// DigesterOption is a functional option for configuring a digester. +type DigesterOption func(*Digester) + +// WithIndex returns a DigesterOption that sets the index to the provided map. +// The map is copied, so any changes to the map after the option is applied +// will not be reflected in the index. +func WithIndex(i map[string]string) DigesterOption { + return func(d *Digester) { + if i != nil { + d.mu.Lock() + defer d.mu.Unlock() + + if d.index == nil { + d.index = make(map[string]string, len(i)) + } + for k, v := range i { + d.index[k] = v + } + d.reset() + } + } +} + +// NewDigester returns a new digest index with an empty initialized index. +func NewDigester(opts ...DigesterOption) *Digester { + d := &Digester{ + digests: make(map[digest.Algorithm]digest.Digest, 0), + index: make(map[string]string, 0), + } + for _, opt := range opts { + opt(d) + } + return d +} + +// Add adds the key and digest to the index. +func (i *Digester) Add(key, value string) { + i.mu.Lock() + defer i.mu.Unlock() + + i.index[key] = value + i.reset() +} + +// Delete removes the key from the index. +func (i *Digester) Delete(key string) { + i.mu.Lock() + defer i.mu.Unlock() + + if _, ok := i.index[key]; ok { + delete(i.index, key) + i.reset() + } +} + +// Get returns the digest for the key, or an empty digest if the key is not +// found. +func (i *Digester) Get(key string) string { + i.mu.RLock() + defer i.mu.RUnlock() + + return i.index[key] +} + +// Has returns true if the index contains the key. +func (i *Digester) Has(key string) bool { + i.mu.RLock() + defer i.mu.RUnlock() + + _, ok := i.index[key] + return ok +} + +// Index returns a copy of the index. +func (i *Digester) Index() map[string]string { + i.mu.RLock() + defer i.mu.RUnlock() + + index := make(map[string]string, len(i.index)) + for k, v := range i.index { + index[k] = v + } + return index +} + +// Len returns the number of keys in the index. +func (i *Digester) Len() int { + i.mu.RLock() + defer i.mu.RUnlock() + return len(i.index) +} + +// String returns a string representation of the index. The keys are stable +// sorted, and the string representation of the key/value pairs is written, +// each pair on a newline with a space between them. +func (i *Digester) String() string { + i.mu.RLock() + defer i.mu.RUnlock() + + keys := i.sortedKeys() + var b strings.Builder + for _, k := range keys { + b.Grow(len(k) + len(i.index[k]) + 2) + writeLine(&b, k, i.index[k]) + } + return b.String() +} + +// WriteTo writes the index to the writer. The keys are stable sorted, and the +// string representation of the key/value pairs is written, each pair on a +// newline with a space between them. +func (i *Digester) WriteTo(w io.Writer) (int64, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + keys := i.sortedKeys() + var n int64 + for _, k := range keys { + nn, err := writeLine(w, k, i.index[k]) + n += int64(nn) + if err != nil { + return n, err + } + } + return n, nil +} + +// Digest returns the digest of the index using the provided algorithm. +// If the index has not changed since the last call to Digest, the cached +// digest is returned. +// For verifying the index against a known digest, use Verify. +func (i *Digester) Digest(a digest.Algorithm) digest.Digest { + i.mu.Lock() + defer i.mu.Unlock() + + if _, ok := i.digests[a]; !ok { + digester := a.Digester() + keys := i.sortedKeys() + for _, k := range keys { + _, _ = writeLine(digester.Hash(), k, i.index[k]) + } + i.digests[a] = digester.Digest() + } + + return i.digests[a] +} + +// Verify returns true if the index matches the provided digest. +func (i *Digester) Verify(d digest.Digest) bool { + i.mu.RLock() + defer i.mu.RUnlock() + + verifier := d.Verifier() + keys := i.sortedKeys() + for _, k := range keys { + _, _ = writeLine(verifier, k, i.index[k]) + } + return verifier.Verified() +} + +// sortedKeys returns a slice of the keys in the index, sorted alphabetically. +func (i *Digester) sortedKeys() []string { + keys := make([]string, 0, len(i.index)) + for k := range i.index { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// reset clears the digests cache. +func (i *Digester) reset() { + i.digests = make(map[digest.Algorithm]digest.Digest, 0) +} + +// writeLine writes the key and digest to the writer, separated by a space and +// terminating with a newline. +func writeLine(w io.Writer, key, value string) (int, error) { + return fmt.Fprintf(w, "%s %s\n", key, value) +} diff --git a/internal/index/digest_test.go b/internal/index/digest_test.go new file mode 100644 index 000000000..531bb9329 --- /dev/null +++ b/internal/index/digest_test.go @@ -0,0 +1,381 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "errors" + "testing" + + . "github.com/onsi/gomega" + "github.com/opencontainers/go-digest" +) + +func TestWithIndex(t *testing.T) { + t.Run("sets the index", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{"foo": "bar"} + d := &Digester{} + WithIndex(i)(d) + + g.Expect(d.index).To(Equal(i)) + }) + + t.Run("resets the digests", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{"foo": "bar"} + d := &Digester{ + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + WithIndex(i)(d) + + g.Expect(d.digests).To(BeEmpty()) + }) + + t.Run("handles nil index", func(t *testing.T) { + g := NewWithT(t) + d := &Digester{} + WithIndex(nil)(d) + g.Expect(d.index).To(BeNil()) + }) +} + +func TestNewDigester(t *testing.T) { + t.Run("default", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + + g.Expect(d).ToNot(BeNil()) + g.Expect(d.index).ToNot(BeNil()) + g.Expect(d.digests).ToNot(BeNil()) + }) + + t.Run("with index", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{"foo": "bar"} + d := NewDigester(WithIndex(i)) + + g.Expect(d).ToNot(BeNil()) + g.Expect(d.index).To(Equal(i)) + g.Expect(d.digests).ToNot(BeNil()) + }) +} + +func TestDigester_Add(t *testing.T) { + t.Run("adds", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + + g.Expect(d.index).To(HaveKeyWithValue("foo", "bar")) + }) + + t.Run("overwrites", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + d.Add("foo", "baz") + + g.Expect(d.index).To(HaveKeyWithValue("foo", "baz")) + }) + + t.Run("resets digests", func(t *testing.T) { + g := NewWithT(t) + + d := &Digester{ + index: map[string]string{}, + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + d.Add("foo", "bar") + + g.Expect(d.digests).To(BeEmpty()) + }) + + t.Run("adds empty key and value", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Add("", "") + g.Expect(d.index).To(HaveKeyWithValue("", "")) + }) +} + +func TestDigester_Delete(t *testing.T) { + t.Run("deletes", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + d.Delete("foo") + + g.Expect(d.index).ToNot(HaveKey("foo")) + }) + + t.Run("resets digests", func(t *testing.T) { + g := NewWithT(t) + + d := &Digester{ + index: map[string]string{ + "foo": "bar", + }, + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + + d.Delete("nop") + g.Expect(d.digests).To(HaveLen(1)) + + d.Delete("foo") + g.Expect(d.digests).To(BeEmpty()) + }) + + t.Run("deletes non-existent key without error", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Delete("non-existent") + g.Expect(d.index).To(BeEmpty()) + g.Expect(d.digests).To(BeEmpty()) + }) +} + +func TestDigester_Get(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + + g.Expect(d.Get("foo")).To(Equal("bar")) + g.Expect(d.Get("bar")).To(BeEmpty()) +} + +func TestDigester_Has(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + + g.Expect(d.Has("foo")).To(BeTrue()) + g.Expect(d.Has("bar")).To(BeFalse()) +} + +func TestDigester_Index(t *testing.T) { + t.Run("returns a copy of the index", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{ + "foo": "bar", + "bar": "baz", + } + d := NewDigester(WithIndex(i)) + + iCopy := d.Index() + g.Expect(iCopy).To(Equal(i)) + g.Expect(iCopy).ToNot(BeIdenticalTo(i)) + }) + + t.Run("returns an empty copy for an empty index", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + emptyIndex := d.Index() + g.Expect(emptyIndex).To(BeEmpty()) + }) +} + +func TestDigester_Len(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + + g.Expect(d.Len()).To(Equal(2)) + + g.Expect(NewDigester().Len()).To(Equal(0)) +} + +func TestDigester_String(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + + g.Expect(d.String()).To(Equal(`bar baz +foo bar +`)) + + g.Expect(NewDigester().String()).To(Equal("")) +} + +func TestDigester_WriteTo(t *testing.T) { + t.Run("writes", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + expect := `bar baz +foo bar +` + + var buf bytes.Buffer + n, err := d.WriteTo(&buf) + + g.Expect(n).To(Equal(int64(len(expect)))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(buf.String()).To(Equal(expect)) + }) + + t.Run("errors", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + + w := &fakeWriter{ + err: errors.New("write error"), + written: 5, + } + n, err := d.WriteTo(w) + + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, w.err)).To(BeTrue()) + g.Expect(n).To(Equal(int64(w.written))) + }) +} + +func TestDigester_Digest(t *testing.T) { + t.Run("returns digest", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + expect := digest.SHA256.FromString(d.String()) + + g.Expect(d.Digest(digest.SHA256)).To(Equal(expect)) + g.Expect(d.digests).To(HaveKeyWithValue(digest.SHA256, expect)) + }) + + t.Run("returns cached digest", func(t *testing.T) { + g := NewWithT(t) + + d := &Digester{ + index: map[string]string{ + "foo": "bar", + "bar": "baz", + }, + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + + g.Expect(d.Digest(digest.SHA256)).To(Equal(d.digests[digest.SHA256])) + }) +} + +func TestDigester_Verify(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + })) + + g.Expect(d.Verify(d.Digest(digest.SHA256))).To(BeTrue()) + g.Expect(d.Verify(digest.SHA256.FromString("different"))).To(BeFalse()) +} + +func TestDigester_sortedKeys(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "c/d/e": "bar", + "a/b/c": "baz", + "f/g/h": "foo", + })) + + g.Expect(d.sortedKeys()).To(Equal([]string{ + "a/b/c", + "c/d/e", + "f/g/h", + })) +} + +func TestDigester_reset(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.digests = map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + } + + d.reset() + g.Expect(d.digests).To(BeEmpty()) +} + +func Test_writeLine(t *testing.T) { + t.Run("writes", func(t *testing.T) { + g := NewWithT(t) + + var buf bytes.Buffer + n, err := writeLine(&buf, "foo", "bar") + + g.Expect(n).To(Equal(8)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(buf.String()).To(Equal(`foo bar +`)) + }) + + t.Run("errors", func(t *testing.T) { + g := NewWithT(t) + + w := &fakeWriter{ + err: errors.New("write error"), + written: 5, + } + n, err := writeLine(w, "foo", "bar") + + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, w.err)).To(BeTrue()) + g.Expect(n).To(Equal(w.written)) + }) +} + +type fakeWriter struct { + written int + err error +} + +func (f *fakeWriter) Write(p []byte) (n int, err error) { + return f.written, f.err +} diff --git a/internal/mock/gcs/server.go b/internal/mock/gcs/server.go index 63b60b155..d589a3cbc 100644 --- a/internal/mock/gcs/server.go +++ b/internal/mock/gcs/server.go @@ -32,7 +32,7 @@ import ( ) var ( - ObjectNotFound = errors.New("object not found") + ErrObjectNotFound = errors.New("object not found") ) // Object is a mock Server object. @@ -101,7 +101,7 @@ func (s *Server) getObjectFile(key string, generation int64) ([]byte, error) { } } } - return nil, ObjectNotFound + return nil, ErrObjectNotFound } func (s *Server) handler(w http.ResponseWriter, r *http.Request) { diff --git a/internal/object/object.go b/internal/object/object.go index c4bd32c22..37f8ef9fe 100644 --- a/internal/object/object.go +++ b/internal/object/object.go @@ -17,11 +17,14 @@ limitations under the License. package object import ( + "encoding/json" "errors" "time" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/pkg/apis/meta" ) var ( @@ -112,3 +115,59 @@ func GetRequeueInterval(obj runtime.Object) (time.Duration, error) { } return time.ParseDuration(interval) } + +// GetSuspend returns the spec.suspend of a given runtime object. +func GetSuspend(obj runtime.Object) (bool, error) { + u, err := toUnstructured(obj) + if err != nil { + return false, err + } + suspend, found, err := unstructured.NestedBool(u.Object, "spec", "suspend") + if err != nil { + return false, err + } + // Since suspend is an optional field, it's false when not found. + if !found { + return false, nil + } + return suspend, nil +} + +// SetSuspend sets the spec.suspend value of a given runtime object. +func SetSuspend(obj runtime.Object, val bool) error { + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return err + } + u := unstructured.Unstructured{} + u.SetUnstructuredContent(content) + if err := unstructured.SetNestedField(u.Object, val, "spec", "suspend"); err != nil { + return err + } + return runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) +} + +// GetArtifact returns the status.artifact of a given runtime object. +func GetArtifact(obj runtime.Object) (*meta.Artifact, error) { + u, err := toUnstructured(obj) + if err != nil { + return nil, err + } + artifact, found, err := unstructured.NestedFieldNoCopy(u.Object, "status", "artifact") + if err != nil { + return nil, err + } + // Since artifact is an optional field, return nil when not found. + if !found { + return nil, nil + } + enc, err := json.Marshal(artifact) + if err != nil { + return nil, err + } + outArtifact := &meta.Artifact{} + if err := json.Unmarshal(enc, outArtifact); err != nil { + return nil, err + } + return outArtifact, nil +} diff --git a/internal/object/object_test.go b/internal/object/object_test.go index 9f0d80bbb..35cab3303 100644 --- a/internal/object/object_test.go +++ b/internal/object/object_test.go @@ -24,7 +24,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/pkg/apis/meta" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) func TestGetStatusLastHandledReconcileAt(t *testing.T) { @@ -86,3 +88,51 @@ func TestGetRequeueInterval(t *testing.T) { _, err = GetRequeueInterval(obj2) g.Expect(err).To(Equal(ErrRequeueIntervalNotFound)) } + +func TestGetSuspend(t *testing.T) { + g := NewWithT(t) + + // Get unset suspend value. + obj := &sourcev1.GitRepository{} + suspend, err := GetSuspend(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(suspend).To(BeFalse()) + + // Get set suspend value. + obj.Spec.Suspend = true + suspend, err = GetSuspend(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(suspend).To(BeTrue()) +} + +func TestSetSuspend(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + err := SetSuspend(obj, true) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Spec.Suspend).To(BeTrue()) + + // Overwrite previous value. + err = SetSuspend(obj, false) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Spec.Suspend).To(BeFalse()) +} + +func TestGetArtifact(t *testing.T) { + g := NewWithT(t) + + // Get unset artifact value. + obj := &sourcev1.GitRepository{} + artifact, err := GetArtifact(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact).To(BeNil()) + + // Get set artifact value. + obj.Status.Artifact = &meta.Artifact{Path: "aaa", Revision: "zzz"} + artifact, err = GetArtifact(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact).ToNot(BeNil()) + g.Expect(artifact.Path).To(Equal("aaa")) + g.Expect(artifact.Revision).To(Equal("zzz")) +} diff --git a/internal/oci/auth.go b/internal/oci/auth.go new file mode 100644 index 000000000..6bd35c59e --- /dev/null +++ b/internal/oci/auth.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oci + +import ( + "context" + "strings" + + "github.com/google/go-containerregistry/pkg/authn" + + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +// Anonymous is an authn.AuthConfig that always returns an anonymous +// authenticator. It is useful for registries that do not require authentication +// or when the credentials are not known. +// It implements authn.Keychain `Resolve` method and can be used as a keychain. +type Anonymous authn.AuthConfig + +// Resolve implements authn.Keychain. +func (a Anonymous) Resolve(_ authn.Resource) (authn.Authenticator, error) { + return authn.Anonymous, nil +} + +// OIDCAuth generates the OIDC credential authenticator based on the specified cloud provider. +func OIDCAuth(ctx context.Context, url, provider string, opts ...auth.Option) (authn.Authenticator, error) { + u := strings.TrimPrefix(url, sourcev1.OCIRepositoryPrefix) + return authutils.GetArtifactRegistryCredentials(ctx, provider, u, opts...) +} diff --git a/internal/oci/cosign/cosign.go b/internal/oci/cosign/cosign.go new file mode 100644 index 000000000..75af33091 --- /dev/null +++ b/internal/oci/cosign/cosign.go @@ -0,0 +1,162 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cosign + +import ( + "context" + "crypto" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" + "github.com/sigstore/cosign/v2/pkg/cosign" + ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + + soci "github.com/fluxcd/source-controller/internal/oci" +) + +// options is a struct that holds options for verifier. +type options struct { + publicKey []byte + rOpt []remote.Option + identities []cosign.Identity +} + +// Options is a function that configures the options applied to a Verifier. +type Options func(opts *options) + +// WithPublicKey sets the public key. +func WithPublicKey(publicKey []byte) Options { + return func(opts *options) { + opts.publicKey = publicKey + } +} + +// WithRemoteOptions is a functional option for overriding the default +// remote options used by the verifier. +func WithRemoteOptions(opts ...remote.Option) Options { + return func(o *options) { + o.rOpt = opts + } +} + +// WithIdentities specifies the identity matchers that have to be met +// for the signature to be deemed valid. +func WithIdentities(identities []cosign.Identity) Options { + return func(opts *options) { + opts.identities = identities + } +} + +// CosignVerifier is a struct which is responsible for executing verification logic. +type CosignVerifier struct { + opts *cosign.CheckOpts +} + +// NewCosignVerifier initializes a new CosignVerifier. +func NewCosignVerifier(ctx context.Context, opts ...Options) (*CosignVerifier, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + + checkOpts := &cosign.CheckOpts{} + + ro := coptions.RegistryOptions{} + co, err := ro.ClientOpts(ctx) + if err != nil { + return nil, err + } + + checkOpts.Identities = o.identities + if o.rOpt != nil { + co = append(co, ociremote.WithRemoteOptions(o.rOpt...)) + } + + checkOpts.RegistryClientOpts = co + + // If a public key is provided, it will use it to verify the signature. + // If there is no public key provided, it will try keyless verification. + // https://github.com/sigstore/cosign/blob/main/KEYLESS.md. + if len(o.publicKey) > 0 { + checkOpts.Offline = true + // TODO(hidde): this is an oversight in our implementation. As it is + // theoretically possible to have a custom PK, without disabling tlog. + checkOpts.IgnoreTlog = true + + pubKeyRaw, err := cryptoutils.UnmarshalPEMToPublicKey(o.publicKey) + if err != nil { + return nil, err + } + + checkOpts.SigVerifier, err = signature.LoadVerifier(pubKeyRaw, crypto.SHA256) + if err != nil { + return nil, err + } + } else { + checkOpts.RekorClient, err = rekor.NewClient(coptions.DefaultRekorURL) + if err != nil { + return nil, fmt.Errorf("unable to create Rekor client: %w", err) + } + + // This performs an online fetch of the Rekor public keys, but this is needed + // for verifying tlog entries (both online and offline). + // TODO(hidde): above note is important to keep in mind when we implement + // "offline" tlog above. + if checkOpts.RekorPubKeys, err = cosign.GetRekorPubs(ctx); err != nil { + return nil, fmt.Errorf("unable to get Rekor public keys: %w", err) + } + + checkOpts.CTLogPubKeys, err = cosign.GetCTLogPubs(ctx) + if err != nil { + return nil, fmt.Errorf("unable to get CTLog public keys: %w", err) + } + + if checkOpts.RootCerts, err = fulcio.GetRoots(); err != nil { + return nil, fmt.Errorf("unable to get Fulcio root certs: %w", err) + } + + if checkOpts.IntermediateCerts, err = fulcio.GetIntermediates(); err != nil { + return nil, fmt.Errorf("unable to get Fulcio intermediate certs: %w", err) + } + } + + return &CosignVerifier{ + opts: checkOpts, + }, nil +} + +// Verify verifies the authenticity of the given ref OCI image. +// It returns a boolean indicating if the verification was successful. +// It returns an error if the verification fails, nil otherwise. +func (v *CosignVerifier) Verify(ctx context.Context, ref name.Reference) (soci.VerificationResult, error) { + signatures, _, err := cosign.VerifyImageSignatures(ctx, ref, v.opts) + if err != nil { + return soci.VerificationResultFailed, err + } + + if len(signatures) == 0 { + return soci.VerificationResultFailed, nil + } + + return soci.VerificationResultSuccess, nil +} diff --git a/internal/oci/cosign/cosign_test.go b/internal/oci/cosign/cosign_test.go new file mode 100644 index 000000000..f99e7d1f6 --- /dev/null +++ b/internal/oci/cosign/cosign_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cosign + +import ( + "context" + "fmt" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + . "github.com/onsi/gomega" + "github.com/sigstore/cosign/v2/pkg/cosign" + + testproxy "github.com/fluxcd/source-controller/tests/proxy" + testregistry "github.com/fluxcd/source-controller/tests/registry" +) + +func TestOptions(t *testing.T) { + tests := []struct { + name string + opts []Options + want *options + }{{ + name: "no options", + want: &options{}, + }, { + name: "signature option", + opts: []Options{WithPublicKey([]byte("foo"))}, + want: &options{ + publicKey: []byte("foo"), + rOpt: nil, + }, + }, { + name: "keychain option", + opts: []Options{WithRemoteOptions(remote.WithAuthFromKeychain(authn.DefaultKeychain))}, + want: &options{ + publicKey: nil, + rOpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, + }, + }, { + name: "keychain and authenticator option", + opts: []Options{WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + )}, + want: &options{ + publicKey: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + }, + }, + }, { + name: "keychain, authenticator and transport option", + opts: []Options{WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + )}, + want: &options{ + publicKey: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + }, + }, + }, { + name: "identities option", + opts: []Options{WithIdentities([]cosign.Identity{ + { + SubjectRegExp: "test-user", + IssuerRegExp: "^https://token.actions.githubusercontent.com$", + }, + { + SubjectRegExp: "dev-user", + IssuerRegExp: "^https://accounts.google.com$", + }, + })}, + want: &options{ + identities: []cosign.Identity{ + { + SubjectRegExp: "test-user", + IssuerRegExp: "^https://token.actions.githubusercontent.com$", + }, + { + SubjectRegExp: "dev-user", + IssuerRegExp: "^https://accounts.google.com$", + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + o := options{} + for _, opt := range test.opts { + opt(&o) + } + if !reflect.DeepEqual(o.publicKey, test.want.publicKey) { + t.Errorf("got %#v, want %#v", &o.publicKey, test.want.publicKey) + } + + if test.want.rOpt != nil { + if len(o.rOpt) != len(test.want.rOpt) { + t.Errorf("got %d remote options, want %d", len(o.rOpt), len(test.want.rOpt)) + } + return + } + + if test.want.rOpt == nil { + if len(o.rOpt) != 0 { + t.Errorf("got %d remote options, want %d", len(o.rOpt), 0) + } + } + }) + } +} + +func TestPrivateKeyVerificationWithProxy(t *testing.T) { + g := NewWithT(t) + + registryAddr := testregistry.New(t) + + tagURL := fmt.Sprintf("%s/fluxcd/source-controller:v1.3.0", registryAddr) + ref, err := name.ParseReference(tagURL) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + keys, err := cosign.GenerateKeyPair(func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + }) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + err: "image tag not found", + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(tt.proxyURL) + + var opts []Options + opts = append(opts, WithRemoteOptions(remote.WithTransport(transport))) + opts = append(opts, WithPublicKey(keys.PublicBytes)) + + verifier, err := NewCosignVerifier(ctx, opts...) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = verifier.Verify(ctx, ref) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + }) + } +} diff --git a/internal/oci/notation/notation.go b/internal/oci/notation/notation.go new file mode 100644 index 000000000..0158ffd03 --- /dev/null +++ b/internal/oci/notation/notation.go @@ -0,0 +1,404 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package notation + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "strings" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + _ "github.com/notaryproject/notation-core-go/signature/cose" + _ "github.com/notaryproject/notation-core-go/signature/jws" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/registry" + verifier "github.com/notaryproject/notation-go/verifier" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/notaryproject/notation-go/verifier/truststore" + oras "oras.land/oras-go/v2/registry/remote" + oauth "oras.land/oras-go/v2/registry/remote/auth" + retryhttp "oras.land/oras-go/v2/registry/remote/retry" + + "github.com/fluxcd/source-controller/internal/helm/common" + "github.com/fluxcd/source-controller/internal/oci" +) + +// name of the trustpolicy file defined in the Secret containing +// notation public keys. +const DefaultTrustPolicyKey = "trustpolicy.json" + +// options is a struct that holds options for verifier. +type options struct { + rootCertificates [][]byte + rOpt []remote.Option + trustPolicy *trustpolicy.Document + auth authn.Authenticator + keychain authn.Keychain + insecure bool + logger logr.Logger + transport *http.Transport +} + +// Options is a function that configures the options applied to a Verifier. +type Options func(opts *options) + +// WithInsecureRegistry sets notation to verify against insecure registry. +func WithInsecureRegistry(insecure bool) Options { + return func(opts *options) { + opts.insecure = insecure + } +} + +// WithTrustPolicy sets the trust policy configuration. +func WithTrustPolicy(trustPolicy *trustpolicy.Document) Options { + return func(opts *options) { + opts.trustPolicy = trustPolicy + } +} + +// WithRootCertificates is a functional option for overriding the default +// rootCertificate options used by the verifier to set the root CA certificate for notary. +// It takes in a list of certificate data as an array of byte slices. +// The function returns a options function option that sets the public certificate +// in the notation options. +func WithRootCertificates(data [][]byte) Options { + return func(opts *options) { + opts.rootCertificates = data + } +} + +// WithRemoteOptions is a functional option for overriding the default +// remote options used by the verifier +func WithRemoteOptions(opts ...remote.Option) Options { + return func(o *options) { + o.rOpt = opts + } +} + +// WithAuth is a functional option for overriding the default +// authenticator options used by the verifier +func WithAuth(auth authn.Authenticator) Options { + return func(o *options) { + o.auth = auth + } +} + +// WithKeychain is a functional option for overriding the default +// keychain options used by the verifier +func WithKeychain(key authn.Keychain) Options { + return func(o *options) { + o.keychain = key + } +} + +// WithLogger is a function that returns an Options function to set the logger for the options. +// The logger is used for logging purposes within the options. +func WithLogger(logger logr.Logger) Options { + return func(o *options) { + o.logger = logger + } +} + +// WithTransport is a function that returns an Options function to set the transport for the options. +func WithTransport(transport *http.Transport) Options { + return func(o *options) { + o.transport = transport + } +} + +// NotationVerifier is a struct which is responsible for executing verification logic +type NotationVerifier struct { + auth authn.Authenticator + keychain authn.Keychain + verifier *notation.Verifier + opts []remote.Option + insecure bool + logger logr.Logger + transport *http.Transport +} + +var _ truststore.X509TrustStore = &trustStore{} + +// trustStore is used by notation-go/verifier to retrieve the root certificate for notary. +// The default behaviour is to read the certificate from disk and return it as a byte slice. +// The reason for implementing the interface here is to avoid reading the certificate from disk +// as the certificate is already available in memory. +type trustStore struct { + certs [][]byte +} + +// GetCertificates implements truststore.X509TrustStore. +func (s trustStore) GetCertificates(ctx context.Context, storeType truststore.Type, namedStore string) ([]*x509.Certificate, error) { + certs := []*x509.Certificate{} + for _, data := range s.certs { + raw := data + block, _ := pem.Decode(raw) + if block != nil { + raw = block.Bytes + } + + cert, err := x509.ParseCertificates(raw) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate '%s': %s", namedStore, err) + } + + certs = append(certs, cert...) + } + + return certs, nil +} + +// NewNotationVerifier initializes a new Verifier +func NewNotationVerifier(opts ...Options) (*NotationVerifier, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + + store := &trustStore{ + certs: o.rootCertificates, + } + + trustpolicy := o.trustPolicy + if trustpolicy == nil { + return nil, fmt.Errorf("trust policy cannot be empty") + } + + verifier, err := verifier.New(trustpolicy, store, nil) + if err != nil { + return nil, err + } + + return &NotationVerifier{ + auth: o.auth, + keychain: o.keychain, + verifier: &verifier, + opts: o.rOpt, + insecure: o.insecure, + logger: o.logger, + transport: o.transport, + }, nil +} + +// CleanTrustPolicy cleans the given trust policy by removing trust stores and trusted identities +// for trust policy statements that are set to skip signature verification but still have configured trust stores and/or trusted identities. +// It takes a pointer to a trustpolicy.Document and a logger from the logr package as input parameters. +// If the trustPolicy is nil, it returns nil. +// Otherwise, it iterates over the trustPolicy.TrustPolicies and checks if each trust policy statement's +// SignatureVerification.VerificationLevel is set to trustpolicy.LevelSkip.Name. +// If it is, it logs a warning message and removes the trust stores and trusted identities for that trust policy statement. +// Finally, it returns the modified trustPolicy. +func CleanTrustPolicy(trustPolicy *trustpolicy.Document, logger logr.Logger) *trustpolicy.Document { + if trustPolicy == nil { + return nil + } + + for i, j := range trustPolicy.TrustPolicies { + if j.SignatureVerification.VerificationLevel == trustpolicy.LevelSkip.Name { + if len(j.TrustStores) > 0 || len(j.TrustedIdentities) > 0 { + logger.Info(fmt.Sprintf("warning: trust policy statement '%s' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", j.Name)) + } + trustPolicy.TrustPolicies[i].TrustStores = []string{} + trustPolicy.TrustPolicies[i].TrustedIdentities = []string{} + } + } + + return trustPolicy +} + +// Verify verifies the authenticity of the given ref OCI image. +// It returns a boolean indicating if the verification was successful. +// It returns an error if the verification fails, nil otherwise. +func (v *NotationVerifier) Verify(ctx context.Context, ref name.Reference) (oci.VerificationResult, error) { + url := ref.Name() + + remoteRepo, err := v.remoteRepo(url) + if err != nil { + return oci.VerificationResultFailed, err + } + + repo := registry.NewRepository(remoteRepo) + + repoUrl, err := v.repoUrlWithDigest(url, ref) + if err != nil { + return oci.VerificationResultFailed, err + } + + verifyOptions := notation.VerifyOptions{ + ArtifactReference: repoUrl, + MaxSignatureAttempts: 3, + } + + _, outcomes, err := notation.Verify(ctx, *v.verifier, repo, verifyOptions) + if err != nil { + return oci.VerificationResultFailed, err + } + + return v.checkOutcome(outcomes, url) +} + +// checkOutcome checks the verification outcomes for a given URL and returns the corresponding OCI verification result. +// It takes a slice of verification outcomes and a URL as input parameters. +// If there are no verification outcomes, it returns a failed verification result with an error message. +// If the first verification outcome has a verification level of "trustpolicy.LevelSkip", it returns an ignored verification result. +// This function assumes that "trustpolicy.TypeIntegrity" is always enforced. It will return a successful validation result if "trustpolicy.TypeAuthenticity" is successful too. +// If any of the verification results have an error, it logs the error message and sets the "ignore" flag to true if the error type is "trustpolicy.TypeAuthenticity". +// If the "ignore" flag is true, it returns an ignored verification result. +// Otherwise, it returns a successful verification result. +// The function returns the OCI verification result and an error, if any. +func (v *NotationVerifier) checkOutcome(outcomes []*notation.VerificationOutcome, url string) (oci.VerificationResult, error) { + if len(outcomes) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("signature verification failed for all the signatures associated with %s", url) + } + + // should only ever be one item in the outcomes slice + outcome := outcomes[0] + + // if the verification level is set to skip, we ignore the verification result + // as there should be no verification results in outcome and we do not want + // to mark the result as verified + if outcome.VerificationLevel == trustpolicy.LevelSkip { + return oci.VerificationResultIgnored, nil + } + + ignore := false + + // loop through verification results to check for errors + for _, i := range outcome.VerificationResults { + // error if action is not marked as `skip` and there is an error + if i.Error != nil { + // flag to ignore the verification result if the error is related to type `authenticity` + if i.Type == trustpolicy.TypeAuthenticity { + ignore = true + } + // log results of error + v.logger.Info(fmt.Sprintf("verification check for type '%s' failed for '%s' with message: '%s'", i.Type, url, i.Error.Error())) + } + } + + // if the ignore flag is set, we ignore the verification result so not to mark as verified + if ignore { + return oci.VerificationResultIgnored, nil + } + + // result is okay to mark as verified + return oci.VerificationResultSuccess, nil +} + +// remoteRepo is a function that creates a remote repository object for the given repository URL. +// It initializes the repository with the provided URL and sets the PlainHTTP flag based on the value of the 'insecure' field in the Verifier struct. +// It also sets up the credential provider based on the authentication configuration provided in the Verifier struct. +// If authentication is required, it retrieves the authentication credentials and sets up the repository client with the appropriate headers and credentials. +// Finally, it returns the remote repository object and any error encountered during the process. +func (v *NotationVerifier) remoteRepo(repoUrl string) (*oras.Repository, error) { + remoteRepo, err := oras.NewRepository(repoUrl) + if err != nil { + return &oras.Repository{}, err + } + + remoteRepo.PlainHTTP = v.insecure + + credentialProvider := func(ctx context.Context, registry string) (oauth.Credential, error) { + return oauth.EmptyCredential, nil + } + + auth := authn.Anonymous + + if v.auth != nil { + auth = v.auth + } else if v.keychain != nil { + source := common.StringResource{Registry: repoUrl} + + auth, err = v.keychain.Resolve(source) + if err != nil { + return &oras.Repository{}, err + } + } + + if auth != authn.Anonymous { + authConfig, err := auth.Authorization() + if err != nil { + return &oras.Repository{}, err + } + + credentialProvider = func(ctx context.Context, registry string) (oauth.Credential, error) { + if authConfig.Username != "" || authConfig.Password != "" || authConfig.IdentityToken != "" || authConfig.RegistryToken != "" { + return oauth.Credential{ + Username: authConfig.Username, + Password: authConfig.Password, + RefreshToken: authConfig.IdentityToken, + AccessToken: authConfig.RegistryToken, + }, nil + } + return oauth.EmptyCredential, nil + } + } + + hc := retryhttp.DefaultClient + if v.transport != nil { + hc = &http.Client{ + Transport: retryhttp.NewTransport(v.transport), + } + } + repoClient := &oauth.Client{ + Client: hc, + Header: http.Header{ + "User-Agent": {"flux"}, + }, + Credential: credentialProvider, + } + + remoteRepo.Client = repoClient + + return remoteRepo, nil +} + +// repoUrlWithDigest takes a repository URL and a reference and returns the repository URL with the digest appended to it. +// If the repository URL does not contain a tag or digest, it returns an error. +func (v *NotationVerifier) repoUrlWithDigest(repoUrl string, ref name.Reference) (string, error) { + if !strings.Contains(repoUrl, "@") { + image, err := remote.Image(ref, v.opts...) + if err != nil { + return "", err + } + + digest, err := image.Digest() + if err != nil { + return "", err + } + + lastIndex := strings.LastIndex(repoUrl, ":") + if lastIndex == -1 { + return "", fmt.Errorf("url %s does not contain tag or digest", repoUrl) + } + + firstPart := repoUrl[:lastIndex] + + if s := strings.Split(repoUrl, ":"); len(s) >= 2 { + repoUrl = fmt.Sprintf("%s@%s", firstPart, digest) + } else { + return "", fmt.Errorf("url %s does not contain tag or digest", repoUrl) + } + } + return repoUrl, nil +} diff --git a/internal/oci/notation/notation_test.go b/internal/oci/notation/notation_test.go new file mode 100644 index 000000000..cdd8a3872 --- /dev/null +++ b/internal/oci/notation/notation_test.go @@ -0,0 +1,651 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package notation + +import ( + "context" + "fmt" + "net/http" + "net/url" + "path" + "reflect" + "testing" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + + "github.com/fluxcd/source-controller/internal/oci" + testproxy "github.com/fluxcd/source-controller/tests/proxy" + testregistry "github.com/fluxcd/source-controller/tests/registry" +) + +func TestOptions(t *testing.T) { + testCases := []struct { + name string + opts []Options + want *options + }{ + { + name: "no options", + want: &options{}, + }, + { + name: "signature option", + opts: []Options{WithRootCertificates([][]byte{[]byte("foo")})}, + want: &options{ + rootCertificates: [][]byte{[]byte("foo")}, + rOpt: nil, + }, + }, + { + name: "keychain option", + opts: []Options{ + WithRemoteOptions(remote.WithAuthFromKeychain(authn.DefaultKeychain)), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "keychain and authenticator option", + opts: []Options{ + WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + ), + WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + }, + auth: &authn.Basic{Username: "foo", Password: "bar"}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "keychain, authenticator and transport option", + opts: []Options{ + WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + ), + WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + }, + auth: &authn.Basic{Username: "foo", Password: "bar"}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "truststore, empty document", + opts: []Options{WithTrustPolicy(&trustpolicy.Document{})}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: &trustpolicy.Document{}, + }, + }, + { + name: "truststore, dummy document", + opts: []Options{WithTrustPolicy(dummyPolicyDocument())}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: dummyPolicyDocument(), + }, + }, + { + name: "insecure, false", + opts: []Options{WithInsecureRegistry(false)}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: false, + }, + }, + { + name: "insecure, true", + opts: []Options{WithInsecureRegistry(true)}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: true, + }, + }, + { + name: "insecure, default", + opts: []Options{}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: false, + }, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + o := options{} + for _, opt := range tc.opts { + opt(&o) + } + if !reflect.DeepEqual(o.rootCertificates, tc.want.rootCertificates) { + t.Errorf("got %#v, want %#v", &o.rootCertificates, tc.want.rootCertificates) + } + + if !reflect.DeepEqual(o.trustPolicy, tc.want.trustPolicy) { + t.Errorf("got %#v, want %#v", &o.trustPolicy, tc.want.trustPolicy) + } + + if tc.want.rOpt != nil { + if len(o.rOpt) != len(tc.want.rOpt) { + t.Errorf("got %d remote options, want %d", len(o.rOpt), len(tc.want.rOpt)) + } + return + } + + if tc.want.rOpt == nil { + if len(o.rOpt) != 0 { + t.Errorf("got %d remote options, want %d", len(o.rOpt), 0) + } + } + }) + } +} + +func TestCleanTrustPolicy(t *testing.T) { + testCases := []struct { + name string + policy []trustpolicy.TrustPolicy + want *trustpolicy.Document + wantLogMessage string + }{ + { + name: "no trust policy", + want: nil, + }, + { + name: "trust policy verification level set to strict and should not be cleaned", + policy: []trustpolicy.TrustPolicy{{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }}, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }}, + }, + }, + { + name: "trust policy with multiple policies and should not be cleaned", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }, + }, + }, + }, + { + name: "trust policy verification level skip should be cleaned", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{}, + TrustedIdentities: []string{}, + }, + }, + }, + wantLogMessage: "warning: trust policy statement 'test-statement-name' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", + }, + { + name: "trust policy with multiple policies and mixture of verification levels including skip", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{}, + TrustedIdentities: []string{}, + }, + }, + }, + wantLogMessage: "warning: trust policy statement 'test-statement-name-2' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + var policy *trustpolicy.Document + + if tc.policy != nil { + policy = &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: tc.policy, + } + } + + cleanedPolicy := CleanTrustPolicy(policy, logger) + + if !reflect.DeepEqual(cleanedPolicy, tc.want) { + t.Errorf("got %#v, want %#v", cleanedPolicy, tc.want) + } + + if tc.wantLogMessage != "" { + g.Expect(len(l.Output)).Should(Equal(1)) + g.Expect(l.Output[0]).Should(Equal(tc.wantLogMessage)) + } + }) + } +} + +func TestOutcomeChecker(t *testing.T) { + testCases := []struct { + name string + outcome []*notation.VerificationOutcome + wantErrMessage string + wantLogMessage []string + wantVerificationResult oci.VerificationResult + }{ + { + name: "no outcome failed with error message", + wantVerificationResult: oci.VerificationResultFailed, + wantErrMessage: "signature verification failed for all the signatures associated with example.com/podInfo", + }, + { + name: "verification result ignored with log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{ + { + Type: trustpolicy.TypeAuthenticity, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("123"), + }, + }, + }, + }, + wantVerificationResult: oci.VerificationResultIgnored, + wantLogMessage: []string{"verification check for type 'authenticity' failed for 'example.com/podInfo' with message: '123'"}, + }, + { + name: "verification result ignored with no log message (skip)", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelSkip, + VerificationResults: []*notation.ValidationResult{}, + }, + }, + wantVerificationResult: oci.VerificationResultIgnored, + }, + { + name: "verification result success with log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{ + { + Type: trustpolicy.TypeAuthenticTimestamp, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("456"), + }, + { + Type: trustpolicy.TypeExpiry, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("789"), + }, + }, + }, + }, + wantVerificationResult: oci.VerificationResultSuccess, + wantLogMessage: []string{ + "verification check for type 'authenticTimestamp' failed for 'example.com/podInfo' with message: '456'", + "verification check for type 'expiry' failed for 'example.com/podInfo' with message: '789'", + }, + }, + { + name: "verification result success with no log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{}, + }, + }, + wantVerificationResult: oci.VerificationResultSuccess, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + v := NotationVerifier{ + logger: logger, + } + + result, err := v.checkOutcome(tc.outcome, "example.com/podInfo") + + if tc.wantErrMessage != "" { + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).Should(Equal(tc.wantErrMessage)) + } else { + g.Expect(err).To(BeNil()) + } + + g.Expect(result).Should(Equal(tc.wantVerificationResult)) + g.Expect(len(l.Output)).Should(Equal(len(tc.wantLogMessage))) + + for i, j := range tc.wantLogMessage { + g.Expect(l.Output[i]).Should(Equal(j)) + } + }) + } +} + +func TestRepoUrlWithDigest(t *testing.T) { + testCases := []struct { + name string + repoUrl string + digest string + tag string + wantResultUrl string + wantErrMessage string + passUrlWithoutTag bool + }{ + { + name: "valid repo url with digest", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + digest: "sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "", + }, + { + name: "valid repo url with tag", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + tag: "6.6.0", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "", + }, + { + name: "valid repo url without tag", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + tag: "6.6.0", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "url ghcr.io/stefanprodan/charts/podinfo does not contain tag or digest", + passUrlWithoutTag: true, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + v := NotationVerifier{ + logger: logger, + } + + var url string + repo, _ := name.NewRepository(tc.repoUrl) + var ref name.Reference + if tc.digest != "" { + ref = repo.Digest(tc.digest) + url = fmt.Sprintf("%s@%s", tc.repoUrl, tc.digest) + } else if tc.tag != "" { + ref = repo.Tag(tc.tag) + if !tc.passUrlWithoutTag { + url = fmt.Sprintf("%s:%s", tc.repoUrl, tc.tag) + } else { + url = tc.repoUrl + } + } else { + ref = repo.Tag(name.DefaultTag) + url = fmt.Sprintf("%s:%s", tc.repoUrl, name.DefaultTag) + } + + result, err := v.repoUrlWithDigest(url, ref) + + if tc.wantErrMessage != "" { + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).Should(Equal(tc.wantErrMessage)) + } else { + g.Expect(err).To(BeNil()) + g.Expect(result).Should(Equal(tc.wantResultUrl)) + } + }) + } +} + +func TestVerificationWithProxy(t *testing.T) { + g := NewWithT(t) + + registryAddr := testregistry.New(t) + + tarFilePath := path.Join("..", "..", "controller", "testdata", "podinfo", "podinfo-6.1.5.tar") + _, err := testregistry.CreatePodinfoImageFromTar(tarFilePath, "6.1.5", registryAddr) + g.Expect(err).NotTo(HaveOccurred()) + + tagURL := fmt.Sprintf("%s/podinfo:6.1.5", registryAddr) + ref, err := name.ParseReference(tagURL) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + err: "no signature is associated with", + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(tt.proxyURL) + + var opts []Options + opts = append(opts, WithTransport(transport)) + opts = append(opts, WithTrustPolicy(dummyPolicyDocument())) + opts = append(opts, WithInsecureRegistry(true)) + + verifier, err := NewNotationVerifier(opts...) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = verifier.Verify(ctx, ref) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + }) + } +} + +func dummyPolicyDocument() (policyDoc *trustpolicy.Document) { + policyDoc = &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{dummyPolicyStatement()}, + } + return +} + +func dummyPolicyStatement() (policyStatement trustpolicy.TrustPolicy) { + policyStatement = trustpolicy.TrustPolicy{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"ca:valid-trust-store", "signingAuthority:valid-trust-store"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + } + return +} + +// mocking LogSink to capture log messages. Source: https://stackoverflow.com/a/71425740 +type testLogger struct { + Output []string + r logr.RuntimeInfo +} + +func (t *testLogger) doLog(msg string) { + t.Output = append(t.Output, msg) +} + +func (t *testLogger) Init(info logr.RuntimeInfo) { + t.r = info +} + +func (t *testLogger) Enabled(level int) bool { + return true +} + +func (t *testLogger) Info(level int, msg string, keysAndValues ...interface{}) { + t.doLog(msg) +} + +func (t *testLogger) Error(err error, msg string, keysAndValues ...interface{}) { + t.doLog(msg) +} + +func (t *testLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { + return t +} + +func (t *testLogger) WithName(name string) logr.LogSink { + return t +} diff --git a/internal/oci/verifier.go b/internal/oci/verifier.go new file mode 100644 index 000000000..eeb301eb0 --- /dev/null +++ b/internal/oci/verifier.go @@ -0,0 +1,42 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oci + +import ( + "context" + + "github.com/google/go-containerregistry/pkg/name" +) + +// VerificationResult represents the result of a verification process. +type VerificationResult string + +const ( + // VerificationResultSuccess indicates that the artifact has been verified. + VerificationResultSuccess VerificationResult = "verified" + // VerificationResultFailed indicates that the artifact could not be verified. + VerificationResultFailed VerificationResult = "unverified" + // VerificationResultIgnored indicates that the artifact has not been verified + // but is allowed to proceed. This is used primarily when notation is used + // as the verifier. + VerificationResultIgnored VerificationResult = "ignored" +) + +// Verifier is an interface for verifying the authenticity of an OCI image. +type Verifier interface { + Verify(ctx context.Context, ref name.Reference) (VerificationResult, error) +} diff --git a/internal/predicates/helmrepository_type_predicate.go b/internal/predicates/helmrepository_type_predicate.go index 76694b82f..714d77942 100644 --- a/internal/predicates/helmrepository_type_predicate.go +++ b/internal/predicates/helmrepository_type_predicate.go @@ -18,69 +18,69 @@ package predicates import ( "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) -// helmRepositoryTypeFilter filters events for a given HelmRepository type. -// It returns true if the event is for a HelmRepository of the given type. -func helmRepositoryTypeFilter(repositoryType string, o client.Object) bool { - if o == nil { - return false - } - - // return true if the object is a HelmRepository - // and the type is the same as the one we are looking for. - hr, ok := o.(*sourcev1.HelmRepository) - if !ok { - return false - } +// HelmRepositoryOCIMigrationPredicate implements predicate functions to allow +// events for HelmRepository OCI that need migration to static object. Non-OCI +// HelmRepositories are always allowed. +type HelmRepositoryOCIMigrationPredicate struct { + predicate.Funcs +} - return hr.Spec.Type == repositoryType +// Create allows events for objects that need migration to static object. +func (HelmRepositoryOCIMigrationPredicate) Create(e event.CreateEvent) bool { + return HelmRepositoryOCIRequireMigration(e.Object) } -// HelmRepositoryTypePredicate is a predicate that filters events for a given HelmRepository type. -type HelmRepositoryTypePredicate struct { - RepositoryType string - predicate.Funcs +// Update allows events for objects that need migration to static object. +func (HelmRepositoryOCIMigrationPredicate) Update(e event.UpdateEvent) bool { + return HelmRepositoryOCIRequireMigration(e.ObjectNew) } -// Create returns true if the Create event is for a HelmRepository of the given type. -func (h HelmRepositoryTypePredicate) Create(e event.CreateEvent) bool { - return helmRepositoryTypeFilter(h.RepositoryType, e.Object) +// Delete allows events for objects that need migration to static object. +func (HelmRepositoryOCIMigrationPredicate) Delete(e event.DeleteEvent) bool { + return HelmRepositoryOCIRequireMigration(e.Object) } -// Update returns true if the Update event is for a HelmRepository of the given type. -func (h HelmRepositoryTypePredicate) Update(e event.UpdateEvent) bool { - if e.ObjectOld == nil || e.ObjectNew == nil { +// HelmRepositoryOCIRequireMigration returns if a given HelmRepository of type +// OCI requires migration to static object. For non-OCI HelmRepository, it +// returns true. +func HelmRepositoryOCIRequireMigration(o client.Object) bool { + if o == nil { return false } - // check if the old object is a HelmRepository - oldObj, ok := e.ObjectOld.(*sourcev1.HelmRepository) + hr, ok := o.(*sourcev1.HelmRepository) if !ok { return false } - // check if the new object is a HelmRepository - newObj, ok := e.ObjectNew.(*sourcev1.HelmRepository) - if !ok { - return false + if hr.Spec.Type != sourcev1.HelmRepositoryTypeOCI { + // Always allow non-OCI HelmRepository. + return true } - isOfRepositoryType := newObj.Spec.Type == h.RepositoryType - wasOfRepositoryType := oldObj.Spec.Type == h.RepositoryType && !isOfRepositoryType - return isOfRepositoryType || wasOfRepositoryType -} + if controllerutil.ContainsFinalizer(hr, sourcev1.SourceFinalizer) || !hasEmptyHelmRepositoryStatus(hr) { + return true + } -// Delete returns true if the Delete event is for a HelmRepository of the given type. -func (h HelmRepositoryTypePredicate) Delete(e event.DeleteEvent) bool { - return helmRepositoryTypeFilter(h.RepositoryType, e.Object) + return false } -// Generic returns true if the Generic event is for a HelmRepository of the given type. -func (h HelmRepositoryTypePredicate) Generic(e event.GenericEvent) bool { - return helmRepositoryTypeFilter(h.RepositoryType, e.Object) +// hasEmptyHelmRepositoryStatus checks if the status of a HelmRepository is +// empty. +func hasEmptyHelmRepositoryStatus(obj *sourcev1.HelmRepository) bool { + if obj.Status.ObservedGeneration == 0 && + obj.Status.Conditions == nil && + obj.Status.URL == "" && + obj.Status.Artifact == nil && + obj.Status.ReconcileRequestStatus.LastHandledReconcileAt == "" { + return true + } + return false } diff --git a/internal/predicates/helmrepository_type_predicate_test.go b/internal/predicates/helmrepository_type_predicate_test.go index e54726892..e98728413 100644 --- a/internal/predicates/helmrepository_type_predicate_test.go +++ b/internal/predicates/helmrepository_type_predicate_test.go @@ -19,109 +19,240 @@ package predicates import ( "testing" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "sigs.k8s.io/controller-runtime/pkg/client" + . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/event" -) -func TestHelmRepositoryTypePredicate_Create(t *testing.T) { - obj := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{}} - http := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{Type: "default"}} - oci := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{Type: "oci"}} - not := &unstructured.Unstructured{} + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) +func TestHelmRepositoryOCIMigrationPredicate_Create(t *testing.T) { tests := []struct { - name string - obj client.Object - want bool + name string + beforeFunc func(o *sourcev1.HelmRepository) + want bool }{ - {name: "new", obj: obj, want: false}, - {name: "http", obj: http, want: true}, - {name: "oci", obj: oci, want: false}, - {name: "not a HelmRepository", obj: not, want: false}, - {name: "nil", obj: nil, want: false}, + { + name: "new oci helm repo no status", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: false, + }, + { + name: "new oci helm repo with default observed gen status", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + o.Status.ObservedGeneration = -1 + }, + want: true, + }, + { + name: "old oci helm repo with finalizer only", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Finalizers = []string{sourcev1.SourceFinalizer} + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: true, + }, + { + name: "old oci helm repo with status only", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + o.Status = sourcev1.HelmRepositoryStatus{ + ObservedGeneration: 3, + } + conditions.MarkTrue(o, meta.ReadyCondition, "foo", "bar") + }, + want: true, + }, + { + name: "old oci helm repo with finalizer and status", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Finalizers = []string{sourcev1.SourceFinalizer} + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + o.Status = sourcev1.HelmRepositoryStatus{ + ObservedGeneration: 3, + } + conditions.MarkTrue(o, meta.ReadyCondition, "foo", "bar") + }, + want: true, + }, + { + name: "new default helm repo", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeDefault + }, + want: true, + }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - g := gomega.NewWithT(t) + g := NewWithT(t) - so := HelmRepositoryTypePredicate{RepositoryType: "default"} - e := event.CreateEvent{ - Object: tt.obj, + o := &sourcev1.HelmRepository{} + if tt.beforeFunc != nil { + tt.beforeFunc(o) } - g.Expect(so.Create(e)).To(gomega.Equal(tt.want)) + e := event.CreateEvent{Object: o} + p := HelmRepositoryOCIMigrationPredicate{} + g.Expect(p.Create(e)).To(Equal(tt.want)) }) } } -func TestHelmRepositoryTypePredicate_Update(t *testing.T) { - repoA := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{ - Type: sourcev1.HelmRepositoryTypeDefault, - }} +func TestHelmRepositoryOCIMigrationPredicate_Update(t *testing.T) { + tests := []struct { + name string + beforeFunc func(oldObj, newObj *sourcev1.HelmRepository) + want bool + }{ + { + name: "update oci repo", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + URL: "oci://foo/bar", + } + *newObj = *oldObj.DeepCopy() + newObj.Spec.URL = "oci://foo/baz" + }, + want: false, + }, + { + name: "migrate old oci repo with status only", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Generation = 2 + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + oldObj.Status = sourcev1.HelmRepositoryStatus{ + ObservedGeneration: 2, + } + conditions.MarkTrue(oldObj, meta.ReadyCondition, "foo", "bar") - repoB := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{ - Type: sourcev1.HelmRepositoryTypeOCI, - }} + *newObj = *oldObj.DeepCopy() + newObj.Generation = 3 + }, + want: true, + }, + { + name: "migrate old oci repo with finalizer only", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Generation = 2 + oldObj.Finalizers = []string{sourcev1.SourceFinalizer} + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } - empty := &sourcev1.HelmRepository{} - not := &unstructured.Unstructured{} + *newObj = *oldObj.DeepCopy() + newObj.Generation = 3 + }, + want: true, + }, + { + name: "type switch default to oci", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeDefault, + } + oldObj.Status = sourcev1.HelmRepositoryStatus{ + Artifact: &meta.Artifact{}, + URL: "http://some-address", + ObservedGeneration: 3, + } - tests := []struct { - name string - old client.Object - new client.Object - want bool - }{ - {name: "diff type", old: repoA, new: repoB, want: true}, - {name: "new with type", old: empty, new: repoA, want: true}, - {name: "old with type", old: repoA, new: empty, want: true}, - {name: "old not a HelmRepository", old: not, new: repoA, want: false}, - {name: "new not a HelmRepository", old: repoA, new: not, want: false}, - {name: "old nil", old: nil, new: repoA, want: false}, - {name: "new nil", old: repoA, new: nil, want: false}, + *newObj = *oldObj.DeepCopy() + newObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + }, + want: true, + }, + { + name: "type switch oci to default", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + *newObj = *oldObj.DeepCopy() + newObj.Spec.Type = sourcev1.HelmRepositoryTypeDefault + }, + want: true, + }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - g := gomega.NewWithT(t) + g := NewWithT(t) - so := HelmRepositoryTypePredicate{RepositoryType: "default"} + oldObj := &sourcev1.HelmRepository{} + newObj := oldObj.DeepCopy() + if tt.beforeFunc != nil { + tt.beforeFunc(oldObj, newObj) + } e := event.UpdateEvent{ - ObjectOld: tt.old, - ObjectNew: tt.new, + ObjectOld: oldObj, + ObjectNew: newObj, } - g.Expect(so.Update(e)).To(gomega.Equal(tt.want)) + p := HelmRepositoryOCIMigrationPredicate{} + g.Expect(p.Update(e)).To(Equal(tt.want)) }) } } -func TestHelmRepositoryTypePredicate_Delete(t *testing.T) { - obj := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{}} - http := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{Type: "default"}} - oci := &sourcev1.HelmRepository{Spec: sourcev1.HelmRepositorySpec{Type: "oci"}} - not := &unstructured.Unstructured{} - +func TestHelmRepositoryOCIMigrationPredicate_Delete(t *testing.T) { tests := []struct { - name string - obj client.Object - want bool + name string + beforeFunc func(obj *sourcev1.HelmRepository) + want bool }{ - {name: "new", obj: obj, want: false}, - {name: "http", obj: http, want: true}, - {name: "oci", obj: oci, want: false}, - {name: "not a HelmRepository", obj: not, want: false}, - {name: "nil", obj: nil, want: false}, + { + name: "oci with finalizer", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Finalizers = []string{sourcev1.SourceFinalizer} + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: true, + }, + { + name: "oci with status", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + obj.Status.ObservedGeneration = 4 + }, + want: true, + }, + { + name: "oci without finalizer or status", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: false, + }, + { + name: "default helm repo", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Spec.Type = sourcev1.HelmRepositoryTypeDefault + }, + want: true, + }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - g := gomega.NewWithT(t) + g := NewWithT(t) - so := HelmRepositoryTypePredicate{RepositoryType: "default"} - e := event.DeleteEvent{ - Object: tt.obj, + obj := &sourcev1.HelmRepository{} + if tt.beforeFunc != nil { + tt.beforeFunc(obj) } - g.Expect(so.Delete(e)).To(gomega.Equal(tt.want)) + e := event.DeleteEvent{Object: obj} + p := HelmRepositoryOCIMigrationPredicate{} + g.Expect(p.Delete(e)).To(Equal(tt.want)) }) } } diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go index 5e3b21e4c..27c931168 100644 --- a/internal/reconcile/reconcile.go +++ b/internal/reconcile/reconcile.go @@ -124,11 +124,20 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb conditions.Delete(obj, meta.ReconcilingCondition) } + // Presence of reconciling means that the reconciliation didn't succeed. + // Set the Reconciling reason to ProgressingWithRetry to indicate a failure + // retry. + if conditions.IsReconciling(obj) { + reconciling := conditions.Get(obj, meta.ReconcilingCondition) + reconciling.Reason = meta.ProgressingWithRetryReason + conditions.Set(obj, reconciling) + } + // Analyze the reconcile error. switch t := recErr.(type) { case *serror.Stalling: if res == ResultEmpty { - conditions.MarkStalled(obj, t.Reason, t.Error()) + conditions.MarkStalled(obj, t.Reason, "%s", t.Error()) // The current generation has been reconciled successfully and it // has resulted in a stalled state. Return no error to stop further // requeuing. @@ -174,9 +183,10 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb // LowestRequeuingResult returns the ReconcileResult with the lowest requeue // period. // Weightage: -// ResultRequeue - immediate requeue (lowest) -// ResultSuccess - requeue at an interval -// ResultEmpty - no requeue +// +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue func LowestRequeuingResult(i, j Result) Result { switch { case i == ResultEmpty: diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go index b9b2ccfea..e22f370b5 100644 --- a/internal/reconcile/reconcile_test.go +++ b/internal/reconcile/reconcile_test.go @@ -29,7 +29,7 @@ import ( "github.com/fluxcd/pkg/runtime/conditions" "github.com/fluxcd/pkg/runtime/patch" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" serror "github.com/fluxcd/source-controller/internal/error" ) @@ -206,6 +206,21 @@ func TestComputeReconcileResult(t *testing.T) { t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) }, }, + { + name: "failed with Reconciling=True adds ProgressingWithRetry reason", + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "some msg") + }, + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "some msg"), + }, + }, } for _, tt := range tests { diff --git a/internal/reconcile/summarize/processor.go b/internal/reconcile/summarize/processor.go index b995d2db5..746ca7c8e 100644 --- a/internal/reconcile/summarize/processor.go +++ b/internal/reconcile/summarize/processor.go @@ -24,9 +24,8 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/object" "github.com/fluxcd/source-controller/internal/reconcile" @@ -37,27 +36,6 @@ import ( // reconciliation failure. The errors can be recorded as logs and events. type ResultProcessor func(context.Context, kuberecorder.EventRecorder, client.Object, reconcile.Result, error) -// RecordContextualError is a ResultProcessor that records the contextual errors -// based on their types. -// An event is recorded for the errors that are returned to the runtime. The -// runtime handles the logging of the error. -// An event is recorded and an error is logged for errors that are known to be -// swallowed, not returned to the runtime. -func RecordContextualError(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, err error) { - switch e := err.(type) { - case *serror.Event: - recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) - case *serror.Waiting: - // Waiting errors are not returned to the runtime. Log it explicitly. - ctrl.LoggerFrom(ctx).Info("reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter) - recorder.Event(obj, corev1.EventTypeNormal, e.Reason, e.Error()) - case *serror.Stalling: - // Stalling errors are not returned to the runtime. Log it explicitly. - ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") - recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) - } -} - // RecordReconcileReq is a ResultProcessor that checks the reconcile // annotation value and sets it in the object status as // status.lastHandledReconcileAt. @@ -113,7 +91,7 @@ func recordEvent(recorder kuberecorder.EventRecorder, obj client.Object, eventTy recorder.Eventf(obj, corev1.EventTypeNormal, reason, err.Error()) } else { // K8s native event only. - recorder.Eventf(obj, events.EventTypeTrace, reason, err.Error()) + recorder.Eventf(obj, eventv1.EventTypeTrace, reason, err.Error()) } case corev1.EventTypeWarning: // TODO: Due to the current implementation of the event recorder, all diff --git a/internal/reconcile/summarize/processor_test.go b/internal/reconcile/summarize/processor_test.go index dc6765d83..44f68b5bf 100644 --- a/internal/reconcile/summarize/processor_test.go +++ b/internal/reconcile/summarize/processor_test.go @@ -26,7 +26,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/fluxcd/pkg/apis/meta" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/object" "github.com/fluxcd/source-controller/internal/reconcile" ) @@ -64,6 +65,43 @@ func TestRecordReconcileReq(t *testing.T) { t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) }, }, + { + name: "empty reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "whitespace-only reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: " ", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt(" ")) + }, + }, + { + name: "reconcile annotation overwrites existing status value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "old-value") + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "new-value", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("new-value")) + }, + }, } for _, tt := range tests { diff --git a/internal/reconcile/summarize/summary.go b/internal/reconcile/summarize/summary.go index d274d03d5..8650a0907 100644 --- a/internal/reconcile/summarize/summary.go +++ b/internal/reconcile/summarize/summary.go @@ -50,15 +50,15 @@ type Conditions struct { // Helper is SummarizeAndPatch helper. type Helper struct { - recorder kuberecorder.EventRecorder - patchHelper *patch.Helper + recorder kuberecorder.EventRecorder + serialPatcher *patch.SerialPatcher } // NewHelper returns an initialized Helper. -func NewHelper(recorder kuberecorder.EventRecorder, patchHelper *patch.Helper) *Helper { +func NewHelper(recorder kuberecorder.EventRecorder, serialPatcher *patch.SerialPatcher) *Helper { return &Helper{ - recorder: recorder, - patchHelper: patchHelper, + recorder: recorder, + serialPatcher: serialPatcher, } } @@ -90,6 +90,9 @@ type HelperOptions struct { // PatchFieldOwner defines the field owner configuration for the Kubernetes // patch operation. PatchFieldOwner string + // BiPolarityConditionTypes is a list of bipolar conditions in the order + // of priority. + BiPolarityConditionTypes []string } // Option is configuration that modifies SummarizeAndPatch. @@ -149,6 +152,14 @@ func WithPatchFieldOwner(fieldOwner string) Option { } } +// WithBiPolarityConditionTypes sets the BiPolarityConditionTypes used to +// calculate the value of Ready condition in SummarizeAndPatch. +func WithBiPolarityConditionTypes(types ...string) Option { + return func(s *HelperOptions) { + s.BiPolarityConditionTypes = types + } +} + // SummarizeAndPatch summarizes and patches the result to the target object. // When used at the very end of a reconciliation, the result builder must be // specified using the Option WithResultBuilder(). The returned result and error @@ -206,6 +217,26 @@ func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, o ) } + // Check any BiPolarity conditions in the status that are False. Failing + // BiPolarity condition should be set as the Ready condition value to + // reflect the actual cause of the reconciliation failure. + // NOTE: This is applicable to Ready condition only because it is a special + // condition in kstatus that reflects the overall state of an object. + // IMPLEMENTATION NOTE: An implementation of this within the + // conditions.merge() exists in fluxcd/pkg repo branch `bipolarity` + // (https://github.com/fluxcd/pkg/commit/756b9e6d253a4fae93c05419b7019d0169454858). + // If that gets added to conditions.merge, the following can be removed. + var failedBiPolarity []string + for _, c := range opts.BiPolarityConditionTypes { + if conditions.IsFalse(obj, c) { + failedBiPolarity = append(failedBiPolarity, c) + } + } + if len(failedBiPolarity) > 0 { + topFailedBiPolarity := conditions.Get(obj, failedBiPolarity[0]) + conditions.MarkFalse(obj, meta.ReadyCondition, topFailedBiPolarity.Reason, "%s", topFailedBiPolarity.Message) + } + // If object is not stalled, result is success and runtime error is nil, // ensure that Ready=True. Else, use the Ready failure message as the // runtime error message. This ensures that the reconciliation would be @@ -219,7 +250,7 @@ func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, o } // Finally, patch the resource. - if err := h.patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + if err := h.serialPatcher.Patch(ctx, obj, patchOpts...); err != nil { // Ignore patch error "not found" when the object is being deleted. if opts.IgnoreNotFound && !obj.GetDeletionTimestamp().IsZero() { err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) diff --git a/internal/reconcile/summarize/summary_test.go b/internal/reconcile/summarize/summary_test.go index b16d19e37..c4c16e4eb 100644 --- a/internal/reconcile/summarize/summary_test.go +++ b/internal/reconcile/summarize/summary_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - "github.com/darkowlzz/controller-check/status" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -34,9 +33,10 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" "github.com/fluxcd/pkg/runtime/patch" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/reconcile" ) @@ -44,11 +44,16 @@ import ( // This tests the scenario where SummarizeAndPatch is used at the very end of a // reconciliation. func TestSummarizeAndPatch(t *testing.T) { + testBipolarCondition1 := "FooChecked1" + testBipolarCondition2 := "FooChecked2" var testReadyConditions = Conditions{ Target: meta.ReadyCondition, Owned: []string{ sourcev1.FetchFailedCondition, sourcev1.ArtifactOutdatedCondition, + sourcev1.SourceVerifiedCondition, + testBipolarCondition1, + testBipolarCondition2, meta.ReadyCondition, meta.ReconcilingCondition, meta.StalledCondition, @@ -56,6 +61,9 @@ func TestSummarizeAndPatch(t *testing.T) { Summarize: []string{ sourcev1.FetchFailedCondition, sourcev1.ArtifactOutdatedCondition, + sourcev1.SourceVerifiedCondition, + testBipolarCondition1, + testBipolarCondition2, meta.StalledCondition, meta.ReconcilingCondition, }, @@ -66,6 +74,7 @@ func TestSummarizeAndPatch(t *testing.T) { meta.ReconcilingCondition, }, } + var testBipolarConditions = []string{sourcev1.SourceVerifiedCondition, testBipolarCondition1, testBipolarCondition2} var testFooConditions = Conditions{ Target: "Foo", Owned: []string{ @@ -83,15 +92,16 @@ func TestSummarizeAndPatch(t *testing.T) { } tests := []struct { - name string - generation int64 - beforeFunc func(obj conditions.Setter) - result reconcile.Result - reconcileErr error - conditions []Conditions - wantErr bool - afterFunc func(t *WithT, obj client.Object) - assertConditions []metav1.Condition + name string + generation int64 + beforeFunc func(obj conditions.Setter) + result reconcile.Result + reconcileErr error + conditions []Conditions + bipolarConditions []string + wantErr bool + afterFunc func(t *WithT, obj client.Object) + assertConditions []metav1.Condition }{ // Success/Fail indicates if a reconciliation succeeded or failed. // The object generation is expected to match the observed generation in @@ -118,7 +128,7 @@ func TestSummarizeAndPatch(t *testing.T) { name: "Success, removes reconciling for successful result", generation: 2, beforeFunc: func(obj conditions.Setter) { - conditions.MarkReconciling(obj, "NewRevision", "new index version") + conditions.MarkReconciling(obj, meta.ProgressingReason, "new index version") conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") }, conditions: []Conditions{testReadyConditions}, @@ -157,7 +167,7 @@ func TestSummarizeAndPatch(t *testing.T) { generation: 7, beforeFunc: func(obj conditions.Setter) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") - conditions.MarkReconciling(obj, "NewRevision", "new index revision") + conditions.MarkReconciling(obj, meta.ProgressingReason, "new index revision") }, conditions: []Conditions{testReadyConditions}, reconcileErr: fmt.Errorf("failed to create dir"), @@ -165,7 +175,7 @@ func TestSummarizeAndPatch(t *testing.T) { assertConditions: []metav1.Condition{ *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), - *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "new index revision"), }, afterFunc: func(t *WithT, obj client.Object) { t.Expect(obj).ToNot(HaveStatusObservedGeneration(7)) @@ -250,6 +260,64 @@ func TestSummarizeAndPatch(t *testing.T) { }, wantErr: true, }, + { + name: "Fail, reconciling with bipolar condition False, Ready gets bipolar failure value", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "new index revision") + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "verify failed") + }, + result: reconcile.ResultEmpty, + reconcileErr: errors.New("failed to verify source"), + conditions: []Conditions{testReadyConditions}, + bipolarConditions: testBipolarConditions, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "VerifyFailed", "verify failed"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "VerifyFailed", "verify failed"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "new index revision"), + }, + }, + { + name: "Fail, bipolar condition True, negative polarity True, Ready gets negative polarity value", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "new obj gen") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Success", "verified") + }, + result: reconcile.ResultEmpty, + reconcileErr: errors.New("failed to create dir"), + conditions: []Conditions{testReadyConditions}, + bipolarConditions: testBipolarConditions, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new digest"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "new obj gen"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Success", "verified"), + }, + }, + { + name: "Fail, multiple bipolar conditions False, Ready gets the bipolar with high priority", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Success", "verified") + conditions.MarkFalse(obj, testBipolarCondition1, "AAA", "aaa") + conditions.MarkFalse(obj, testBipolarCondition2, "BBB", "bbb") + }, + result: reconcile.ResultEmpty, + reconcileErr: errors.New("some failure"), + conditions: []Conditions{testReadyConditions}, + bipolarConditions: testBipolarConditions, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "AAA", "aaa"), + *conditions.FalseCondition(testBipolarCondition1, "AAA", "aaa"), + *conditions.FalseCondition(testBipolarCondition2, "BBB", "bbb"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Success", "verified"), + }, + }, } for _, tt := range tests { @@ -257,10 +325,8 @@ func TestSummarizeAndPatch(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() - g.Expect(sourcev1.AddToScheme(scheme)) + g.Expect(sourcev1.AddToScheme(scheme)).To(Succeed()) - builder := fakeclient.NewClientBuilder().WithScheme(scheme) - client := builder.Build() obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", @@ -275,22 +341,31 @@ func TestSummarizeAndPatch(t *testing.T) { tt.beforeFunc(obj) } + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build() + ctx := context.TODO() - g.Expect(client.Create(ctx, obj)).To(Succeed()) - patchHelper, err := patch.NewHelper(obj, client) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(c.Create(ctx, obj)).To(Succeed()) - summaryHelper := NewHelper(record.NewFakeRecorder(32), patchHelper) + serialPatcher := patch.NewSerialPatcher(obj, c) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), serialPatcher) summaryOpts := []Option{ WithReconcileResult(tt.result), WithReconcileError(tt.reconcileErr), WithConditions(tt.conditions...), WithIgnoreNotFound(), - WithProcessors(RecordContextualError, RecordReconcileReq), + WithProcessors(ErrorActionHandler, RecordReconcileReq), WithResultBuilder(reconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration}), } + if tt.bipolarConditions != nil { + summaryOpts = append(summaryOpts, WithBiPolarityConditionTypes(tt.bipolarConditions...)) + } + _, gotErr := summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) - g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + g.Expect(gotErr != nil).To(Equal(tt.wantErr), "SummarizeAndPatch() wantErr = %v, gotErr = %v", tt.wantErr, gotErr) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) @@ -299,9 +374,9 @@ func TestSummarizeAndPatch(t *testing.T) { } // Check if the object status is valid as per kstatus. - condns := &status.Conditions{NegativePolarity: testReadyConditions.NegativePolarity} - checker := status.NewChecker(client, condns) - checker.CheckErr(ctx, obj) + condns := &conditionscheck.Conditions{NegativePolarity: testReadyConditions.NegativePolarity} + checker := conditionscheck.NewChecker(c, condns) + checker.WithT(g).CheckErr(ctx, obj) }) } } @@ -377,8 +452,10 @@ func TestSummarizeAndPatch_Intermediate(t *testing.T) { scheme := runtime.NewScheme() g.Expect(sourcev1.AddToScheme(scheme)) - builder := fakeclient.NewClientBuilder().WithScheme(scheme) - kclient := builder.Build() + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build() obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ @@ -399,16 +476,15 @@ func TestSummarizeAndPatch_Intermediate(t *testing.T) { } ctx := context.TODO() - g.Expect(kclient.Create(ctx, obj)).To(Succeed()) - patchHelper, err := patch.NewHelper(obj, kclient) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(c.Create(ctx, obj)).To(Succeed()) + serialPatcher := patch.NewSerialPatcher(obj, c) - summaryHelper := NewHelper(record.NewFakeRecorder(32), patchHelper) + summaryHelper := NewHelper(record.NewFakeRecorder(32), serialPatcher) summaryOpts := []Option{ WithConditions(tt.conditions...), WithResultBuilder(reconcile.AlwaysRequeueResultBuilder{RequeueAfter: interval}), } - _, err = summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + _, err := summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) diff --git a/internal/transport/transport.go b/internal/transport/transport.go deleted file mode 100644 index 89286df71..000000000 --- a/internal/transport/transport.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "sync" - "time" -) - -// TransportPool is a progressive and non-blocking pool -// for http.Transport objects, optimised for Gargabe Collection -// and without a hard limit on number of objects created. -// -// Its main purpose is to enable for transport objects to be -// used across helm chart download requests and helm/pkg/getter -// instances by leveraging the getter.WithTransport(t) construct. -// -// The use of this pool improves the default behaviour of helm getter -// which creates a new connection per request, or per getter instance, -// resulting on unnecessary TCP connections with the target. -// -// http.Transport objects may contain sensitive material and also have -// settings that may impact the security of HTTP operations using -// them (i.e. InsecureSkipVerify). Therefore, ensure that they are -// used in a thread-safe way, and also by reseting TLS specific state -// after each use. -// -// Calling the Release(t) function will reset TLS specific state whilst -// also releasing the transport back to the pool to be reused. -// -// xref: https://github.com/helm/helm/pull/10568 -// xref2: https://github.com/fluxcd/source-controller/issues/578 -type TransportPool struct { -} - -var pool = &sync.Pool{ - New: func() interface{} { - return &http.Transport{ - DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - - // Due to the non blocking nature of this approach, - // at peak usage a higher number of transport objects - // may be created. sync.Pool will ensure they are - // gargage collected when/if needed. - // - // By setting a low value to IdleConnTimeout the connections - // will be closed after that period of inactivity, allowing the - // transport to be garbage collected. - IdleConnTimeout: 60 * time.Second, - - // use safe defaults based off http.DefaultTransport - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - }, -} - -// NewOrIdle tries to return an existing transport that is not currently being used. -// If none is found, creates a new Transport instead. -// -// tlsConfig can optionally set the TLSClientConfig for the transport. -func NewOrIdle(tlsConfig *tls.Config) *http.Transport { - t := pool.Get().(*http.Transport) - t.TLSClientConfig = tlsConfig - - return t -} - -// Release releases the transport back to the TransportPool after -// sanitising its sensitive fields. -func Release(transport *http.Transport) error { - if transport == nil { - return fmt.Errorf("cannot release nil transport") - } - - transport.TLSClientConfig = nil - - pool.Put(transport) - return nil -} diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go deleted file mode 100644 index f0bc387d6..000000000 --- a/internal/transport/transport_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "testing" -) - -func Test_TransportReuse(t *testing.T) { - t1 := NewOrIdle(nil) - t2 := NewOrIdle(nil) - - if t1 == t2 { - t.Errorf("same transported returned twice") - } - - err := Release(t2) - if err != nil { - t.Errorf("error releasing transport t2: %v", err) - } - - t3 := NewOrIdle(&tls.Config{ - ServerName: "testing", - }) - if t3.TLSClientConfig == nil || t3.TLSClientConfig.ServerName != "testing" { - t.Errorf("TLSClientConfig not properly configured") - } - - err = Release(t3) - if err != nil { - t.Errorf("error releasing transport t3: %v", err) - } - if t3.TLSClientConfig != nil { - t.Errorf("TLSClientConfig not cleared after release") - } - - err = Release(nil) - if err == nil { - t.Errorf("should not allow release nil transport") - } else if err.Error() != "cannot release nil transport" { - t.Errorf("wanted error message: 'cannot release nil transport' got: %q", err.Error()) - } -} diff --git a/main.go b/main.go index 621cea36c..cb019e6e4 100644 --- a/main.go +++ b/main.go @@ -18,39 +18,52 @@ package main import ( "fmt" - "net" - "net/http" "os" - "path/filepath" "time" - "github.com/go-logr/logr" flag "github.com/spf13/pflag" "helm.sh/helm/v3/pkg/getter" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" - + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlcfg "sigs.k8s.io/controller-runtime/pkg/config" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + artcfg "github.com/fluxcd/pkg/artifact/config" + artdigest "github.com/fluxcd/pkg/artifact/digest" + artsrv "github.com/fluxcd/pkg/artifact/server" + artstore "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + pkgcache "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/client" helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" feathelper "github.com/fluxcd/pkg/runtime/features" + "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" + "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" - "github.com/fluxcd/source-controller/internal/features" - "github.com/fluxcd/source-controller/internal/helm/registry" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - "github.com/fluxcd/source-controller/controllers" + sourcev1 "github.com/fluxcd/source-controller/api/v1" + + // +kubebuilder:scaffold:imports + "github.com/fluxcd/source-controller/internal/cache" + "github.com/fluxcd/source-controller/internal/controller" + "github.com/fluxcd/source-controller/internal/features" "github.com/fluxcd/source-controller/internal/helm" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" - // +kubebuilder:scaffold:imports + "github.com/fluxcd/source-controller/internal/helm/registry" ) const controllerName = "source-controller" @@ -78,29 +91,32 @@ func init() { } func main() { + const ( + tokenCacheDefaultMaxSize = 100 + ) + var ( - metricsAddr string - eventsAddr string - healthAddr string - storagePath string - storageAddr string - storageAdvAddr string - concurrent int - requeueDependency time.Duration - watchAllNamespaces bool - helmIndexLimit int64 - helmChartLimit int64 - helmChartFileLimit int64 - clientOptions client.Options - logOptions logger.Options - leaderElectionOptions leaderelection.Options - rateLimiterOptions helper.RateLimiterOptions - featureGates feathelper.FeatureGates - helmCacheMaxSize int - helmCacheTTL string - helmCachePurgeInterval string - artifactRetentionTTL time.Duration - artifactRetentionRecords int + metricsAddr string + eventsAddr string + healthAddr string + concurrent int + requeueDependency time.Duration + helmIndexLimit int64 + helmChartLimit int64 + helmChartFileLimit int64 + artifactOptions artcfg.Options + clientOptions client.Options + logOptions logger.Options + leaderElectionOptions leaderelection.Options + rateLimiterOptions helper.RateLimiterOptions + featureGates feathelper.FeatureGates + watchOptions helper.WatchOptions + intervalJitterOptions jitter.IntervalOptions + helmCacheMaxSize int + helmCacheTTL string + helmCachePurgeInterval string + tokenCacheOptions pkgcache.TokenFlags + defaultServiceAccount string ) flag.StringVar(&metricsAddr, "metrics-addr", envOrDefault("METRICS_ADDR", ":8080"), @@ -108,15 +124,7 @@ func main() { flag.StringVar(&eventsAddr, "events-addr", envOrDefault("EVENTS_ADDR", ""), "The address of the events receiver.") flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") - flag.StringVar(&storagePath, "storage-path", envOrDefault("STORAGE_PATH", ""), - "The local storage path.") - flag.StringVar(&storageAddr, "storage-addr", envOrDefault("STORAGE_ADDR", ":9090"), - "The address the static file server binds to.") - flag.StringVar(&storageAdvAddr, "storage-adv-addr", envOrDefault("STORAGE_ADV_ADDR", ""), - "The advertised address of the static file server.") flag.IntVar(&concurrent, "concurrent", 2, "The number of concurrent reconciles per controller.") - flag.BoolVar(&watchAllNamespaces, "watch-all-namespaces", true, - "Watch for custom resources in all namespaces, if set to false it will only watch the runtime namespace.") flag.Int64Var(&helmIndexLimit, "helm-index-max-size", helm.MaxIndexSize, "The max allowed size in bytes of a Helm repository index file.") flag.Int64Var(&helmChartLimit, "helm-chart-max-size", helm.MaxChartSize, @@ -135,191 +143,164 @@ func main() { "The list of key exchange algorithms to use for ssh connections, arranged from most preferred to the least.") flag.StringSliceVar(&git.HostKeyAlgos, "ssh-hostkey-algos", []string{}, "The list of hostkey algorithms to use for ssh connections, arranged from most preferred to the least.") - flag.DurationVar(&artifactRetentionTTL, "artifact-retention-ttl", 60*time.Second, - "The duration of time that artifacts will be kept in storage before being garbage collected.") - flag.IntVar(&artifactRetentionRecords, "artifact-retention-records", 2, - "The maximum number of artifacts to be kept in storage after a garbage collection.") + flag.StringVar(&defaultServiceAccount, auth.ControllerFlagDefaultServiceAccount, + "", "Default service account to use for workload identity when not specified in resources.") + artifactOptions.BindFlags(flag.CommandLine) clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) rateLimiterOptions.BindFlags(flag.CommandLine) featureGates.BindFlags(flag.CommandLine) + watchOptions.BindFlags(flag.CommandLine) + intervalJitterOptions.BindFlags(flag.CommandLine) + tokenCacheOptions.BindFlags(flag.CommandLine, tokenCacheDefaultMaxSize) flag.Parse() - ctrl.SetLogger(logger.NewLogger(logOptions)) + logger.SetLogger(logger.NewLogger(logOptions)) - err := featureGates.WithLogger(setupLog). - SupportedFeatures(features.FeatureGates()) + if defaultServiceAccount != "" { + auth.SetDefaultServiceAccount(defaultServiceAccount) + } - if err != nil { + if err := featureGates.WithLogger(setupLog).SupportedFeatures(features.FeatureGates()); err != nil { setupLog.Error(err, "unable to load feature gates") os.Exit(1) } - // Set upper bound file size limits Helm - helm.MaxIndexSize = helmIndexLimit - helm.MaxChartSize = helmChartLimit - helm.MaxChartFileSize = helmChartFileLimit - - watchNamespace := "" - if !watchAllNamespaces { - watchNamespace = os.Getenv("RUNTIME_NAMESPACE") + switch enabled, err := features.Enabled(auth.FeatureGateObjectLevelWorkloadIdentity); { + case err != nil: + setupLog.Error(err, "unable to check feature gate "+auth.FeatureGateObjectLevelWorkloadIdentity) + os.Exit(1) + case enabled: + auth.EnableObjectLevelWorkloadIdentity() } - restConfig := client.GetConfigOrDie(clientOptions) - mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - HealthProbeBindAddress: healthAddr, - Port: 9443, - LeaderElection: leaderElectionOptions.Enable, - LeaderElectionReleaseOnCancel: leaderElectionOptions.ReleaseOnCancel, - LeaseDuration: &leaderElectionOptions.LeaseDuration, - RenewDeadline: &leaderElectionOptions.RenewDeadline, - RetryPeriod: &leaderElectionOptions.RetryPeriod, - LeaderElectionID: fmt.Sprintf("%s-leader-election", controllerName), - Namespace: watchNamespace, - Logger: ctrl.Log, - }) - if err != nil { - setupLog.Error(err, "unable to start manager") + if auth.InconsistentObjectLevelConfiguration() { + setupLog.Error(auth.ErrInconsistentObjectLevelConfiguration, "invalid configuration") os.Exit(1) } - probes.SetupChecks(mgr, setupLog) - pprof.SetupHandlers(mgr, setupLog) - - var eventRecorder *events.Recorder - if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") + if err := intervalJitterOptions.SetGlobalJitter(nil); err != nil { + setupLog.Error(err, "unable to set global jitter") os.Exit(1) } - metricsH := helper.MustMakeMetrics(mgr) + mgr := mustSetupManager(metricsAddr, healthAddr, concurrent, watchOptions, clientOptions, leaderElectionOptions) - if storageAdvAddr == "" { - storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) - } - storage := mustInitStorage(storagePath, storageAdvAddr, artifactRetentionTTL, artifactRetentionRecords, setupLog) + probes.SetupChecks(mgr, setupLog) - if err = managed.InitManagedTransport(); err != nil { - // Log the error, but don't exit so as to not block reconcilers that are healthy. - setupLog.Error(err, "unable to initialize libgit2 managed transport") - } + metrics := helper.NewMetrics(mgr, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer) + cacheRecorder := cache.MustMakeMetrics() + eventRecorder := mustSetupEventRecorder(mgr, eventsAddr, controllerName) - if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, - Metrics: metricsH, - Storage: storage, - ControllerName: controllerName, - Libgit2TransportInitialized: managed.Enabled, - }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ - MaxConcurrentReconciles: concurrent, - DependencyRequeueInterval: requeueDependency, - RateLimiter: helper.GetRateLimiter(rateLimiterOptions), - }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", sourcev1.GitRepositoryKind) + algo, err := artdigest.AlgorithmForName(artifactOptions.ArtifactDigestAlgo) + if err != nil { + setupLog.Error(err, "unable to configure canonical digest algorithm") os.Exit(1) } + artdigest.Canonical = algo - if err = (&controllers.HelmRepositoryOCIReconciler{ - Client: mgr.GetClient(), - EventRecorder: eventRecorder, - Metrics: metricsH, - Getters: getters, - ControllerName: controllerName, - RegistryClientGenerator: registry.ClientGenerator, - }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ - MaxConcurrentReconciles: concurrent, - RateLimiter: helper.GetRateLimiter(rateLimiterOptions), - }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind, "type", "OCI") + storage, err := artstore.New(&artifactOptions) + if err != nil { + setupLog.Error(err, "unable to configure artifact storage") os.Exit(1) } - var c *cache.Cache - var ttl time.Duration - if helmCacheMaxSize > 0 { - interval, err := time.ParseDuration(helmCachePurgeInterval) - if err != nil { - setupLog.Error(err, "unable to parse cache purge interval") - os.Exit(1) - } + mustSetupHelmLimits(helmIndexLimit, helmChartLimit, helmChartFileLimit) + helmIndexCache, helmIndexCacheItemTTL := mustInitHelmCache(helmCacheMaxSize, helmCacheTTL, helmCachePurgeInterval) - ttl, err = time.ParseDuration(helmCacheTTL) + var tokenCache *pkgcache.TokenCache + if tokenCacheOptions.MaxSize > 0 { + var err error + tokenCache, err = pkgcache.NewTokenCache(tokenCacheOptions.MaxSize, + pkgcache.WithMaxDuration(tokenCacheOptions.MaxDuration), + pkgcache.WithMetricsRegisterer(ctrlmetrics.Registry), + pkgcache.WithMetricsPrefix("gotk_token_")) if err != nil { - setupLog.Error(err, "unable to parse cache TTL") + setupLog.Error(err, "unable to create token cache") os.Exit(1) } - - c = cache.New(helmCacheMaxSize, interval) } - cacheRecorder := cache.MustMakeMetrics() + ctx := ctrl.SetupSignalHandler() + + if err := (&controller.GitRepositoryReconciler{ + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metrics, + Storage: storage, + ControllerName: controllerName, + TokenCache: tokenCache, + }).SetupWithManagerAndOptions(mgr, controller.GitRepositoryReconcilerOptions{ + DependencyRequeueInterval: requeueDependency, + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", sourcev1.GitRepositoryKind) + os.Exit(1) + } - if err = (&controllers.HelmRepositoryReconciler{ + if err := (&controller.HelmRepositoryReconciler{ Client: mgr.GetClient(), EventRecorder: eventRecorder, - Metrics: metricsH, + Metrics: metrics, Storage: storage, Getters: getters, ControllerName: controllerName, - Cache: c, - TTL: ttl, + Cache: helmIndexCache, + TTL: helmIndexCacheItemTTL, CacheRecorder: cacheRecorder, - }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ - MaxConcurrentReconciles: concurrent, - RateLimiter: helper.GetRateLimiter(rateLimiterOptions), + }).SetupWithManagerAndOptions(mgr, controller.HelmRepositoryReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind) os.Exit(1) } - if err = (&controllers.HelmChartReconciler{ + if err := (&controller.HelmChartReconciler{ Client: mgr.GetClient(), RegistryClientGenerator: registry.ClientGenerator, Storage: storage, Getters: getters, EventRecorder: eventRecorder, - Metrics: metricsH, + Metrics: metrics, ControllerName: controllerName, - Cache: c, - TTL: ttl, + Cache: helmIndexCache, + TTL: helmIndexCacheItemTTL, CacheRecorder: cacheRecorder, - }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ - MaxConcurrentReconciles: concurrent, - RateLimiter: helper.GetRateLimiter(rateLimiterOptions), + }).SetupWithManagerAndOptions(ctx, mgr, controller.HelmChartReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind) os.Exit(1) } - if err = (&controllers.BucketReconciler{ + + if err := (&controller.BucketReconciler{ Client: mgr.GetClient(), EventRecorder: eventRecorder, - Metrics: metricsH, + Metrics: metrics, Storage: storage, ControllerName: controllerName, - }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ - MaxConcurrentReconciles: concurrent, - RateLimiter: helper.GetRateLimiter(rateLimiterOptions), + TokenCache: tokenCache, + }).SetupWithManagerAndOptions(mgr, controller.BucketReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Bucket") + setupLog.Error(err, "unable to create controller", "controller", sourcev1.BucketKind) os.Exit(1) } - if err = (&controllers.OCIRepositoryReconciler{ + + if err := (&controller.OCIRepositoryReconciler{ Client: mgr.GetClient(), Storage: storage, EventRecorder: eventRecorder, ControllerName: controllerName, - Metrics: metricsH, - }).SetupWithManagerAndOptions(mgr, controllers.OCIRepositoryReconcilerOptions{ - MaxConcurrentReconciles: concurrent, - RateLimiter: helper.GetRateLimiter(rateLimiterOptions), + TokenCache: tokenCache, + Metrics: metrics, + }).SetupWithManagerAndOptions(mgr, controller.OCIRepositoryReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "OCIRepository") + setupLog.Error(err, "unable to create controller", "controller", sourcev1.OCIRepositoryKind) os.Exit(1) } // +kubebuilder:scaffold:builder @@ -330,63 +311,132 @@ func main() { // to handle that. <-mgr.Elected() - startFileServer(storage.BasePath, storageAddr, setupLog) + // Start the artifact server if running as leader. + if err := artsrv.Start(ctx, &artifactOptions); err != nil { + setupLog.Error(err, "artifact server error") + os.Exit(1) + } }() setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } } -func startFileServer(path string, address string, l logr.Logger) { - l.Info("starting file server") - fs := http.FileServer(http.Dir(path)) - http.Handle("/", fs) - err := http.ListenAndServe(address, nil) +func mustSetupEventRecorder(mgr ctrl.Manager, eventsAddr, controllerName string) record.EventRecorder { + eventRecorder, err := events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName) if err != nil { - l.Error(err, "file server error") + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) } + return eventRecorder } -func mustInitStorage(path string, storageAdvAddr string, artifactRetentionTTL time.Duration, artifactRetentionRecords int, l logr.Logger) *controllers.Storage { - if path == "" { - p, _ := os.Getwd() - path = filepath.Join(p, "bin") - os.MkdirAll(path, 0o700) +func mustSetupManager(metricsAddr, healthAddr string, maxConcurrent int, + watchOpts helper.WatchOptions, clientOpts client.Options, leaderOpts leaderelection.Options) ctrl.Manager { + + watchNamespace := "" + if !watchOpts.AllNamespaces { + watchNamespace = os.Getenv("RUNTIME_NAMESPACE") + } + + watchSelector, err := helper.GetWatchSelector(watchOpts) + if err != nil { + setupLog.Error(err, "unable to configure watch label selector for manager") + os.Exit(1) } - storage, err := controllers.NewStorage(path, storageAdvAddr, artifactRetentionTTL, artifactRetentionRecords) + var disableCacheFor []ctrlclient.Object + shouldCache, err := features.Enabled(features.CacheSecretsAndConfigMaps) if err != nil { - l.Error(err, "unable to initialise storage") + setupLog.Error(err, "unable to check feature gate "+features.CacheSecretsAndConfigMaps) os.Exit(1) } + if !shouldCache { + disableCacheFor = append(disableCacheFor, &corev1.Secret{}, &corev1.ConfigMap{}) + } - return storage + leaderElectionId := fmt.Sprintf("%s-%s", controllerName, "leader-election") + if watchOpts.LabelSelector != "" { + leaderElectionId = leaderelection.GenerateID(leaderElectionId, watchOpts.LabelSelector) + } + + restConfig := client.GetConfigOrDie(clientOpts) + mgrConfig := ctrl.Options{ + Scheme: scheme, + HealthProbeBindAddress: healthAddr, + LeaderElection: leaderOpts.Enable, + LeaderElectionReleaseOnCancel: leaderOpts.ReleaseOnCancel, + LeaseDuration: &leaderOpts.LeaseDuration, + RenewDeadline: &leaderOpts.RenewDeadline, + RetryPeriod: &leaderOpts.RetryPeriod, + LeaderElectionID: leaderElectionId, + Logger: ctrl.Log, + Client: ctrlclient.Options{ + Cache: &ctrlclient.CacheOptions{ + DisableFor: disableCacheFor, + }, + }, + Cache: ctrlcache.Options{ + ByObject: map[ctrlclient.Object]ctrlcache.ByObject{ + &sourcev1.GitRepository{}: {Label: watchSelector}, + &sourcev1.HelmRepository{}: {Label: watchSelector}, + &sourcev1.HelmChart{}: {Label: watchSelector}, + &sourcev1.Bucket{}: {Label: watchSelector}, + &sourcev1.OCIRepository{}: {Label: watchSelector}, + }, + }, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + ExtraHandlers: pprof.GetHandlers(), + }, + Controller: ctrlcfg.Controller{ + RecoverPanic: ptr.To(true), + MaxConcurrentReconciles: maxConcurrent, + }, + } + + if watchNamespace != "" { + mgrConfig.Cache.DefaultNamespaces = map[string]ctrlcache.Config{ + watchNamespace: ctrlcache.Config{}, + } + } + + mgr, err := ctrl.NewManager(restConfig, mgrConfig) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + return mgr } -func determineAdvStorageAddr(storageAddr string, l logr.Logger) string { - host, port, err := net.SplitHostPort(storageAddr) +func mustSetupHelmLimits(indexLimit, chartLimit, chartFileLimit int64) { + helm.MaxIndexSize = indexLimit + helm.MaxChartSize = chartLimit + helm.MaxChartFileSize = chartFileLimit +} + +func mustInitHelmCache(maxSize int, itemTTL, purgeInterval string) (*cache.Cache, time.Duration) { + if maxSize <= 0 { + setupLog.Info("caching of Helm index files is disabled") + return nil, -1 + } + + interval, err := time.ParseDuration(purgeInterval) if err != nil { - l.Error(err, "unable to parse storage address") + setupLog.Error(err, "unable to parse Helm index cache purge interval") os.Exit(1) } - switch host { - case "": - host = "localhost" - case "0.0.0.0": - host = os.Getenv("HOSTNAME") - if host == "" { - hn, err := os.Hostname() - if err != nil { - l.Error(err, "0.0.0.0 specified in storage addr but hostname is invalid") - os.Exit(1) - } - host = hn - } + + ttl, err := time.ParseDuration(itemTTL) + if err != nil { + setupLog.Error(err, "unable to parse Helm index cache item TTL") + os.Exit(1) } - return net.JoinHostPort(host, port) + + return cache.New(maxSize, interval), ttl } func envOrDefault(envName, defaultValue string) string { diff --git a/pkg/git/git.go b/pkg/git/git.go deleted file mode 100644 index 5ce6fb09a..000000000 --- a/pkg/git/git.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package git - -import ( - "bytes" - "context" - "fmt" - "strings" - "time" - - "github.com/ProtonMail/go-crypto/openpgp" -) - -type Implementation string - -type Hash []byte - -// String returns the SHA1 Hash as a string. -func (h Hash) String() string { - return string(h) -} - -type Signature struct { - Name string - Email string - When time.Time -} - -type Commit struct { - // Hash is the SHA1 hash of the commit. - Hash Hash - // Reference is the original reference of the commit, for example: - // 'refs/tags/foo'. - Reference string - // Author is the original author of the commit. - Author Signature - // Committer is the one performing the commit, might be different from - // Author. - Committer Signature - // Signature is the PGP signature of the commit. - Signature string - // Encoded is the encoded commit, without any signature. - Encoded []byte - // Message is the commit message, contains arbitrary text. - Message string -} - -// String returns a string representation of the Commit, composed -// out the last part of the Reference element, and/or Hash. -// For example: 'tag-1/a0c14dc8580a23f79bc654faa79c4f62b46c2c22', -// for a "tag-1" tag. -func (c *Commit) String() string { - if short := strings.SplitAfterN(c.Reference, "/", 3); len(short) == 3 { - return fmt.Sprintf("%s/%s", short[2], c.Hash) - } - return fmt.Sprintf("HEAD/%s", c.Hash) -} - -// Verify the Signature of the commit with the given key rings. -// It returns the fingerprint of the key the signature was verified -// with, or an error. -func (c *Commit) Verify(keyRing ...string) (string, error) { - if c.Signature == "" { - return "", fmt.Errorf("commit does not have a PGP signature") - } - - for _, r := range keyRing { - reader := strings.NewReader(r) - keyring, err := openpgp.ReadArmoredKeyRing(reader) - if err != nil { - return "", fmt.Errorf("failed to read armored key ring: %w", err) - } - signer, err := openpgp.CheckArmoredDetachedSignature(keyring, bytes.NewBuffer(c.Encoded), bytes.NewBufferString(c.Signature), nil) - if err == nil { - return fmt.Sprintf("%X", signer.PrimaryKey.Fingerprint[12:20]), nil - } - } - return "", fmt.Errorf("failed to verify commit with any of the given key rings") -} - -// ShortMessage returns the first 50 characters of a commit subject. -func (c *Commit) ShortMessage() string { - subject := strings.Split(c.Message, "\n")[0] - r := []rune(subject) - if len(r) > 50 { - return fmt.Sprintf("%s...", string(r[0:50])) - } - return subject -} - -type CheckoutStrategy interface { - Checkout(ctx context.Context, path, url string, config *AuthOptions) (*Commit, error) -} - -// IsConcreteCommit returns if a given commit is a concrete commit. Concrete -// commits have most of commit metadata and commit content. In contrast, a -// partial commit may only have some metadata and no commit content. -func IsConcreteCommit(c Commit) bool { - if c.Hash != nil && c.Encoded != nil { - return true - } - return false -} diff --git a/pkg/git/git_test.go b/pkg/git/git_test.go deleted file mode 100644 index 5b67b23bd..000000000 --- a/pkg/git/git_test.go +++ /dev/null @@ -1,304 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package git - -import ( - "testing" - "time" - - . "github.com/onsi/gomega" -) - -const ( - encodedCommitFixture = `tree f0c522d8cc4c90b73e2bc719305a896e7e3c108a -parent eb167bc68d0a11530923b1f24b4978535d10b879 -author Stefan Prodan 1633681364 +0300 -committer Stefan Prodan 1633681364 +0300 - -Update containerd and runc to fix CVEs - -Signed-off-by: Stefan Prodan -` - - malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879 -author Stefan Prodan 1633681364 +0300 -committer Stefan Prodan 1633681364 +0300 - -Update containerd and runc to fix CVEs - -Signed-off-by: Stefan Prodan -` - - signatureCommitFixture = `-----BEGIN PGP SIGNATURE----- - -iHUEABEIAB0WIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCYV//1AAKCRAyma6w5Ahb -r7nJAQCQU4zEJu04/Q0ac/UaL6htjhq/wTDNMeUM+aWG/LcBogEAqFUea1oR2BJQ -JCJmEtERFh39zNWSazQmxPAFhEE0kbc= -=+Wlj ------END PGP SIGNATURE-----` - - armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQSuBF9+HgMRDADKT8UBcSzpTi4JXt/ohhVW3x81AGFPrQvs6MYrcnNJfIkPTJD8 -mY5T7j1fkaN5wcf1wnxM9qTcW8BodkWNGEoEYOtVuigLSxPFqIncxK0PHvdU8ths -TEInBrgZv9t6xIVa4QngOEUd2D/aYni7M+75z7ntgj6eU1xLZ60upRFn05862OvJ -rZFUvzjsZXMAO3enCu2VhG/2axCY/5uI8PgWjyiKV2TH4LBJgzlb0v6SyI+fYf5K -Bg2WzDuLKvQBi9tFSwnUbQoFFlOeiGW8G/bdkoJDWeS1oYgSD3nkmvXvrVESCrbT -C05OtQOiDXjSpkLim81vNVPtI2XEug+9fEA+jeJakyGwwB+K8xqV3QILKCoWHKGx -yWcMHSR6cP9tdXCk2JHZBm1PLSJ8hIgMH/YwBJLYg90u8lLAs9WtpVBKkLplzzgm -B4Z4VxCC+xI1kt+3ZgYvYC+oUXJXrjyAzy+J1f+aWl2+S/79glWgl/xz2VibWMz6 -nZUE+wLMxOQqyOsBALsoE6z81y/7gfn4R/BziBASi1jq/r/wdboFYowmqd39DACX -+i+V0OplP2TN/F5JajzRgkrlq5cwZHinnw+IFwj9RTfOkdGb3YwhBt/h2PP38969 -ZG+y8muNtaIqih1pXj1fz9HRtsiCABN0j+JYpvV2D2xuLL7P1O0dt5BpJ3KqNCRw -mGgO2GLxbwvlulsLidCPxdK/M8g9Eeb/xwA5LVwvjVchHkzHuUT7durn7AT0RWiK -BT8iDfeBB9RKienAbWyybEqRaR6/Tv+mghFIalsDiBPbfm4rsNzsq3ohfByqECiy -yUvs2O3NDwkoaBDkA3GFyKv8/SVpcuL5OkVxAHNCIMhNzSgotQ3KLcQc0IREfFCa -3CsBAC7CsE2bJZ9IA9sbBa3jimVhWUQVudRWiLFeYHUF/hjhqS8IHyFwprjEOLaV -EG0kBO6ELypD/bOsmN9XZLPYyI3y9DM6Vo0KMomE+yK/By/ZMxVfex8/TZreUdhP -VdCLL95Rc4w9io8qFb2qGtYBij2wm0RWLcM0IhXWAtjI3B17IN+6hmv+JpiZccsM -AMNR5/RVdXIl0hzr8LROD0Xe4sTyZ+fm3mvpczoDPQNRrWpmI/9OT58itnVmZ5jM -7djV5y/NjBk63mlqYYfkfWto97wkhg0MnTnOhzdtzSiZQRzj+vf+ilLfIlLnuRr1 -JRV9Skv6xQltcFArx4JyfZCo7JB1ZXcbdFAvIXXS11RTErO0XVrXNm2RenpW/yZA -9f+ESQ/uUB6XNuyqVUnJDAFJFLdzx8sO3DXo7dhIlgpFqgQobUl+APpbU5LT95sm -89UrV0Lt9vh7k6zQtKOjEUhm+dErmuBnJo8MvchAuXLagHjvb58vYBCUxVxzt1KG -2IePwJ/oXIfawNEGad9Lmdo1FYG1u53AKWZmpYOTouu92O50FG2+7dBh0V2vO253 -aIGFRT1r14B1pkCIun7z7B/JELqOkmwmlRrUnxlADZEcQT3z/S8/4+2P7P6kXO7X -/TAX5xBhSqUbKe3DhJSOvf05/RVL5ULc2U2JFGLAtmBOFmnD/u0qoo5UvWliI+v/ -47QnU3RlZmFuIFByb2RhbiA8c3RlZmFuLnByb2RhbkBnbWFpbC5jb20+iJAEExEI -ADgWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34eAwIbAwULCQgHAgYVCgkICwIE -FgIDAQIeAQIXgAAKCRAyma6w5Ahbrzu/AP9l2YpRaWZr6wSQuEn0gMN8DRzsWJPx -pn0akdY7SRP3ngD9GoKgu41FAItnHAJ2KiHv/fHFyHMndNP3kPGPNW4BF+65Aw0E -X34eAxAMAMdYFCHmVA8TZxSTMBDpKYave8RiDCMMMjk26Gl0EPN9f2Y+s5++DhiQ -hojNH9VmJkFwZX1xppxe1y1aLa/U6fBAqMP/IdNH8270iv+A9YIxdsWLmpm99BDO -3suRfsHcOe9T0x/CwRfDNdGM/enGMhYGTgF4VD58DRDE6WntaBhl4JJa300NG6X0 -GM4Gh59DKWDnez/Shulj8demlWmakP5imCVoY+omOEc2k3nH02U+foqaGG5WxZZ+ -GwEPswm2sBxvn8nwjy9gbQwEtzNI7lWYiz36wCj2VS56Udqt+0eNg8WzocUT0XyI -moe1qm8YJQ6fxIzaC431DYi/mCDzgx4EV9ww33SXX3Yp2NL6PsdWJWw2QnoqSMpM -z5otw2KlMgUHkkXEKs0apmK4Hu2b6KD7/ydoQRFUqR38Gb0IZL1tOL6PnbCRUcig -Aypy016W/WMCjBfQ8qxIGTaj5agX2t28hbiURbxZkCkz+Z3OWkO0Rq3Y2hNAYM5s -eTn94JIGGwADBgv/dbSZ9LrBvdMwg8pAtdlLtQdjPiT1i9w5NZuQd7OuKhOxYTEB -NRDTgy4/DgeNThCeOkMB/UQQPtJ3Et45S2YRtnnuvfxgnlz7xlUn765/grtnRk4t -ONjMmb6tZos1FjIJecB/6h4RsvUd2egvtlpD/Z3YKr6MpNjWg4ji7m27e9pcJfP6 -YpTDrq9GamiHy9FS2F2pZlQxriPpVhjCLVn9tFGBIsXNxxn7SP4so6rJBmyHEAlq -iym9wl933e0FIgAw5C1vvprYu2amk+jmVBsJjjCmInW5q/kWAFnFaHBvk+v+/7tX -hywWUI7BqseikgUlkgJ6eU7E9z1DEyuS08x/cViDoNh2ntVUhpnluDu48pdqBvvY -a4uL/D+KI84THUAJ/vZy+q6G3BEb4hI9pFjgrdJpUKubxyZolmkCFZHjV34uOcTc -LQr28P8xW8vQbg5DpIsivxYLqDGXt3OyiItxvLMtw/ypt6PkoeP9A4KDST4StITE -1hrOrPtJ/VRmS2o0iHgEGBEIACAWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34e -AwIbDAAKCRAyma6w5Ahbr6QWAP9/pl2R6r1nuCnXzewSbnH1OLsXf32hFQAjaQ5o -Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE= -=/4e+ ------END PGP PUBLIC KEY BLOCK----- -` - - keyRingFingerprintFixture = "3299AEB0E4085BAF" - - malformedKeyRingFixture = ` ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQSuBF9+HgMRDADKT8UBcSzpTi4JXt/ohhVW3x81AGFPrQvs6MYrcnNJfIkPTJD8 -mY5T7j1fkaN5wcf1wnxM9qTcW8BodkWNGEoEYOtVuigLSxPFqIncxK0PHvdU8ths -TEInBrgZv9t6xIVa4QngOEUd2D/aYni7M+75z7ntgj6eU1xLZ60upRFn05862OvJ -rZFUvzjsZXMAO3enCu2VhG/2axCY/5uI8PgWjyiKV2TH4LBJgzlb0v6SyI+fYf5K -Bg2WzDuLKvQBi9tFSwnUbQoFFlOeiGW8G/bdkoJDWeS1oYgSD3nkmvXvrVESCrbT ------END PGP PUBLIC KEY BLOCK----- -` -) - -func TestCommit_String(t *testing.T) { - tests := []struct { - name string - commit *Commit - want string - }{ - { - name: "Reference and commit", - commit: &Commit{ - Hash: []byte("commit"), - Reference: "refs/heads/main", - }, - want: "main/commit", - }, - { - name: "Reference with slash and commit", - commit: &Commit{ - Hash: []byte("commit"), - Reference: "refs/heads/feature/branch", - }, - want: "feature/branch/commit", - }, - { - name: "No reference", - commit: &Commit{ - Hash: []byte("commit"), - }, - want: "HEAD/commit", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - g.Expect(tt.commit.String()).To(Equal(tt.want)) - }) - } -} - -func TestCommit_Verify(t *testing.T) { - tests := []struct { - name string - commit *Commit - keyRings []string - want string - wantErr string - }{ - { - name: "Valid commit signature", - commit: &Commit{ - Encoded: []byte(encodedCommitFixture), - Signature: signatureCommitFixture, - }, - keyRings: []string{armoredKeyRingFixture}, - want: keyRingFingerprintFixture, - }, - { - name: "Malformed encoded commit", - commit: &Commit{ - Encoded: []byte(malformedEncodedCommitFixture), - Signature: signatureCommitFixture, - }, - keyRings: []string{armoredKeyRingFixture}, - wantErr: "failed to verify commit with any of the given key rings", - }, - { - name: "Malformed key ring", - commit: &Commit{ - Encoded: []byte(encodedCommitFixture), - Signature: signatureCommitFixture, - }, - keyRings: []string{malformedKeyRingFixture}, - wantErr: "failed to read armored key ring: unexpected EOF", - }, - { - name: "Missing signature", - commit: &Commit{ - Encoded: []byte(encodedCommitFixture), - }, - keyRings: []string{armoredKeyRingFixture}, - wantErr: "commit does not have a PGP signature", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - got, err := tt.commit.Verify(tt.keyRings...) - if tt.wantErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(got).To(BeEmpty()) - return - } - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) - }) - } -} - -func TestCommit_ShortMessage(t *testing.T) { - tests := []struct { - name string - input string - want string - }{ - { - name: "short message", - input: "a short commit message", - want: "a short commit message", - }, - { - name: "long message", - input: "hello world - a long commit message for testing long messages", - want: "hello world - a long commit message for testing lo...", - }, - { - name: "multi line commit message", - input: `title of the commit - -detailed description -of the commit`, - want: "title of the commit", - }, - { - name: "message with unicodes", - input: "a message with unicode characters 你好世界 🏞️ 🏕️ ⛩️ 🌌", - want: "a message with unicode characters 你好世界 🏞️ 🏕️ ⛩️ 🌌", - }, - { - name: "empty commit message", - input: "", - want: "", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - c := Commit{Message: tt.input} - g.Expect(c.ShortMessage()).To(Equal(tt.want)) - }) - } -} - -func TestIsConcreteCommit(t *testing.T) { - tests := []struct { - name string - commit Commit - result bool - }{ - { - name: "concrete commit", - commit: Commit{ - Hash: Hash("foo"), - Reference: "refs/tags/main", - Author: Signature{ - Name: "user", Email: "user@example.com", When: time.Now(), - }, - Committer: Signature{ - Name: "user", Email: "user@example.com", When: time.Now(), - }, - Signature: "signature", - Encoded: []byte("commit-content"), - Message: "commit-message", - }, - result: true, - }, - { - name: "partial commit", - commit: Commit{Hash: Hash("foo")}, - result: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - g.Expect(IsConcreteCommit(tt.commit)).To(Equal(tt.result)) - }) - } -} diff --git a/pkg/git/gogit/checkout.go b/pkg/git/gogit/checkout.go deleted file mode 100644 index c3c484c61..000000000 --- a/pkg/git/gogit/checkout.go +++ /dev/null @@ -1,424 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gogit - -import ( - "context" - "errors" - "fmt" - "io" - "sort" - "strings" - "time" - - "github.com/Masterminds/semver/v3" - extgogit "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/storage/memory" - - "github.com/fluxcd/pkg/gitutil" - "github.com/fluxcd/pkg/version" - - "github.com/fluxcd/source-controller/pkg/git" -) - -// CheckoutStrategyForOptions returns the git.CheckoutStrategy for the given -// git.CheckoutOptions. -func CheckoutStrategyForOptions(_ context.Context, opts git.CheckoutOptions) git.CheckoutStrategy { - switch { - case opts.Commit != "": - return &CheckoutCommit{Branch: opts.Branch, Commit: opts.Commit, RecurseSubmodules: opts.RecurseSubmodules} - case opts.SemVer != "": - return &CheckoutSemVer{SemVer: opts.SemVer, RecurseSubmodules: opts.RecurseSubmodules} - case opts.Tag != "": - return &CheckoutTag{Tag: opts.Tag, RecurseSubmodules: opts.RecurseSubmodules, LastRevision: opts.LastRevision} - default: - branch := opts.Branch - if branch == "" { - branch = git.DefaultBranch - } - return &CheckoutBranch{Branch: branch, RecurseSubmodules: opts.RecurseSubmodules, LastRevision: opts.LastRevision} - } -} - -type CheckoutBranch struct { - Branch string - RecurseSubmodules bool - LastRevision string -} - -func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (*git.Commit, error) { - authMethod, err := transportAuth(opts) - if err != nil { - return nil, fmt.Errorf("failed to construct auth method with options: %w", err) - } - - ref := plumbing.NewBranchReferenceName(c.Branch) - // check if previous revision has changed before attempting to clone - if c.LastRevision != "" { - currentRevision, err := getLastRevision(ctx, url, ref, opts, authMethod) - if err != nil { - return nil, err - } - - if currentRevision != "" && currentRevision == c.LastRevision { - // Construct a partial commit with the existing information. - // Split the revision and take the last part as the hash. - // Example revision: main/43d7eb9c49cdd49b2494efd481aea1166fc22b67 - var hash git.Hash - ss := strings.Split(currentRevision, "/") - if len(ss) > 1 { - hash = git.Hash(ss[len(ss)-1]) - } else { - hash = git.Hash(ss[0]) - } - c := &git.Commit{ - Hash: hash, - Reference: plumbing.NewBranchReferenceName(c.Branch).String(), - } - return c, nil - } - } - - repo, err := extgogit.PlainCloneContext(ctx, path, false, &extgogit.CloneOptions{ - URL: url, - Auth: authMethod, - RemoteName: git.DefaultOrigin, - ReferenceName: plumbing.NewBranchReferenceName(c.Branch), - SingleBranch: true, - NoCheckout: false, - Depth: 1, - RecurseSubmodules: recurseSubmodules(c.RecurseSubmodules), - Progress: nil, - Tags: extgogit.NoTags, - CABundle: caBundle(opts), - }) - if err != nil { - return nil, fmt.Errorf("unable to clone '%s': %w", url, gitutil.GoGitError(err)) - } - head, err := repo.Head() - if err != nil { - return nil, fmt.Errorf("failed to resolve HEAD of branch '%s': %w", c.Branch, err) - } - cc, err := repo.CommitObject(head.Hash()) - if err != nil { - return nil, fmt.Errorf("failed to resolve commit object for HEAD '%s': %w", head.Hash(), err) - } - return buildCommitWithRef(cc, ref) -} - -func getLastRevision(ctx context.Context, url string, ref plumbing.ReferenceName, opts *git.AuthOptions, authMethod transport.AuthMethod) (string, error) { - config := &config.RemoteConfig{ - Name: git.DefaultOrigin, - URLs: []string{url}, - } - rem := extgogit.NewRemote(memory.NewStorage(), config) - listOpts := &extgogit.ListOptions{ - Auth: authMethod, - } - if opts != nil && opts.CAFile != nil { - listOpts.CABundle = opts.CAFile - } - refs, err := rem.ListContext(ctx, listOpts) - if err != nil { - return "", fmt.Errorf("unable to list remote for '%s': %w", url, err) - } - - currentRevision := filterRefs(refs, ref) - return currentRevision, nil -} - -type CheckoutTag struct { - Tag string - RecurseSubmodules bool - LastRevision string -} - -func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (*git.Commit, error) { - authMethod, err := transportAuth(opts) - if err != nil { - return nil, fmt.Errorf("failed to construct auth method with options: %w", err) - } - ref := plumbing.NewTagReferenceName(c.Tag) - // check if previous revision has changed before attempting to clone - if c.LastRevision != "" { - currentRevision, err := getLastRevision(ctx, url, ref, opts, authMethod) - if err != nil { - return nil, err - } - - if currentRevision != "" && currentRevision == c.LastRevision { - // Construct a partial commit with the existing information. - // Split the revision and take the last part as the hash. - // Example revision: 6.1.4/bf09377bfd5d3bcac1e895fa8ce52dc76695c060 - var hash git.Hash - ss := strings.Split(currentRevision, "/") - if len(ss) > 1 { - hash = git.Hash(ss[len(ss)-1]) - } else { - hash = git.Hash(ss[0]) - } - c := &git.Commit{ - Hash: hash, - Reference: ref.String(), - } - return c, nil - } - } - repo, err := extgogit.PlainCloneContext(ctx, path, false, &extgogit.CloneOptions{ - URL: url, - Auth: authMethod, - RemoteName: git.DefaultOrigin, - ReferenceName: plumbing.NewTagReferenceName(c.Tag), - SingleBranch: true, - NoCheckout: false, - Depth: 1, - RecurseSubmodules: recurseSubmodules(c.RecurseSubmodules), - Progress: nil, - Tags: extgogit.NoTags, - CABundle: caBundle(opts), - }) - if err != nil { - return nil, fmt.Errorf("unable to clone '%s': %w", url, gitutil.GoGitError(err)) - } - head, err := repo.Head() - if err != nil { - return nil, fmt.Errorf("failed to resolve HEAD of tag '%s': %w", c.Tag, err) - } - cc, err := repo.CommitObject(head.Hash()) - if err != nil { - return nil, fmt.Errorf("failed to resolve commit object for HEAD '%s': %w", head.Hash(), err) - } - return buildCommitWithRef(cc, ref) -} - -type CheckoutCommit struct { - Branch string - Commit string - RecurseSubmodules bool -} - -func (c *CheckoutCommit) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (*git.Commit, error) { - authMethod, err := transportAuth(opts) - if err != nil { - return nil, fmt.Errorf("failed to construct auth method with options: %w", err) - } - cloneOpts := &extgogit.CloneOptions{ - URL: url, - Auth: authMethod, - RemoteName: git.DefaultOrigin, - SingleBranch: false, - NoCheckout: true, - RecurseSubmodules: recurseSubmodules(c.RecurseSubmodules), - Progress: nil, - Tags: extgogit.NoTags, - CABundle: caBundle(opts), - } - if c.Branch != "" { - cloneOpts.SingleBranch = true - cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(c.Branch) - } - repo, err := extgogit.PlainCloneContext(ctx, path, false, cloneOpts) - if err != nil { - return nil, fmt.Errorf("unable to clone '%s': %w", url, gitutil.GoGitError(err)) - } - w, err := repo.Worktree() - if err != nil { - return nil, fmt.Errorf("failed to open Git worktree: %w", err) - } - cc, err := repo.CommitObject(plumbing.NewHash(c.Commit)) - if err != nil { - return nil, fmt.Errorf("failed to resolve commit object for '%s': %w", c.Commit, err) - } - err = w.Checkout(&extgogit.CheckoutOptions{ - Hash: cc.Hash, - Force: true, - }) - if err != nil { - return nil, fmt.Errorf("failed to checkout commit '%s': %w", c.Commit, err) - } - return buildCommitWithRef(cc, cloneOpts.ReferenceName) -} - -type CheckoutSemVer struct { - SemVer string - RecurseSubmodules bool -} - -func (c *CheckoutSemVer) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (*git.Commit, error) { - verConstraint, err := semver.NewConstraint(c.SemVer) - if err != nil { - return nil, fmt.Errorf("semver parse error: %w", err) - } - - authMethod, err := transportAuth(opts) - if err != nil { - return nil, fmt.Errorf("failed to construct auth method with options: %w", err) - } - - repo, err := extgogit.PlainCloneContext(ctx, path, false, &extgogit.CloneOptions{ - URL: url, - Auth: authMethod, - RemoteName: git.DefaultOrigin, - NoCheckout: false, - Depth: 1, - RecurseSubmodules: recurseSubmodules(c.RecurseSubmodules), - Progress: nil, - Tags: extgogit.AllTags, - CABundle: caBundle(opts), - }) - if err != nil { - return nil, fmt.Errorf("unable to clone '%s': %w", url, gitutil.GoGitError(err)) - } - - repoTags, err := repo.Tags() - if err != nil { - return nil, fmt.Errorf("failed to list tags: %w", err) - } - - tags := make(map[string]string) - tagTimestamps := make(map[string]time.Time) - if err = repoTags.ForEach(func(t *plumbing.Reference) error { - revision := plumbing.Revision(t.Name().String()) - hash, err := repo.ResolveRevision(revision) - if err != nil { - return fmt.Errorf("unable to resolve tag revision: %w", err) - } - commit, err := repo.CommitObject(*hash) - if err != nil { - return fmt.Errorf("unable to resolve commit of a tag revision: %w", err) - } - tagTimestamps[t.Name().Short()] = commit.Committer.When - - tags[t.Name().Short()] = t.Strings()[1] - return nil - }); err != nil { - return nil, err - } - - var matchedVersions semver.Collection - for tag := range tags { - v, err := version.ParseVersion(tag) - if err != nil { - continue - } - if !verConstraint.Check(v) { - continue - } - matchedVersions = append(matchedVersions, v) - } - if len(matchedVersions) == 0 { - return nil, fmt.Errorf("no match found for semver: %s", c.SemVer) - } - - // Sort versions - sort.SliceStable(matchedVersions, func(i, j int) bool { - left := matchedVersions[i] - right := matchedVersions[j] - - if !left.Equal(right) { - return left.LessThan(right) - } - - // Having tag target timestamps at our disposal, we further try to sort - // versions into a chronological order. This is especially important for - // versions that differ only by build metadata, because it is not considered - // a part of the comparable version in Semver - return tagTimestamps[left.Original()].Before(tagTimestamps[right.Original()]) - }) - v := matchedVersions[len(matchedVersions)-1] - t := v.Original() - - w, err := repo.Worktree() - if err != nil { - return nil, fmt.Errorf("failed to open Git worktree: %w", err) - } - - ref := plumbing.NewTagReferenceName(t) - err = w.Checkout(&extgogit.CheckoutOptions{ - Branch: ref, - }) - if err != nil { - return nil, fmt.Errorf("failed to checkout tag '%s': %w", t, err) - } - head, err := repo.Head() - if err != nil { - return nil, fmt.Errorf("failed to resolve HEAD of tag '%s': %w", t, err) - } - cc, err := repo.CommitObject(head.Hash()) - if err != nil { - return nil, fmt.Errorf("failed to resolve commit object for HEAD '%s': %w", head.Hash(), err) - } - return buildCommitWithRef(cc, ref) -} - -func buildCommitWithRef(c *object.Commit, ref plumbing.ReferenceName) (*git.Commit, error) { - if c == nil { - return nil, errors.New("failed to construct commit: no object") - } - - // Encode commit components excluding signature into SignedData. - encoded := &plumbing.MemoryObject{} - if err := c.EncodeWithoutSignature(encoded); err != nil { - return nil, fmt.Errorf("failed to encode commit '%s': %w", c.Hash, err) - } - reader, err := encoded.Reader() - if err != nil { - return nil, fmt.Errorf("failed to encode commit '%s': %w", c.Hash, err) - } - b, err := io.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed to read encoded commit '%s': %w", c.Hash, err) - } - return &git.Commit{ - Hash: []byte(c.Hash.String()), - Reference: ref.String(), - Author: buildSignature(c.Author), - Committer: buildSignature(c.Committer), - Signature: c.PGPSignature, - Encoded: b, - Message: c.Message, - }, nil -} - -func buildSignature(s object.Signature) git.Signature { - return git.Signature{ - Name: s.Name, - Email: s.Email, - When: s.When, - } -} - -func recurseSubmodules(recurse bool) extgogit.SubmoduleRescursivity { - if recurse { - return extgogit.DefaultSubmoduleRecursionDepth - } - return extgogit.NoRecurseSubmodules -} - -func filterRefs(refs []*plumbing.Reference, currentRef plumbing.ReferenceName) string { - for _, ref := range refs { - if ref.Name().String() == currentRef.String() { - return fmt.Sprintf("%s/%s", currentRef.Short(), ref.Hash().String()) - } - } - - return "" -} diff --git a/pkg/git/gogit/checkout_test.go b/pkg/git/gogit/checkout_test.go deleted file mode 100644 index 61f0833c3..000000000 --- a/pkg/git/gogit/checkout_test.go +++ /dev/null @@ -1,895 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gogit - -import ( - "context" - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/fluxcd/gitkit" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/ssh" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-billy/v5/osfs" - extgogit "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage/filesystem" - . "github.com/onsi/gomega" - - cryptossh "golang.org/x/crypto/ssh" - corev1 "k8s.io/api/core/v1" -) - -const testRepositoryPath = "../testdata/git/repo" - -func TestCheckoutBranch_Checkout(t *testing.T) { - repo, path, err := initRepo(t) - if err != nil { - t.Fatal(err) - } - - firstCommit, err := commitFile(repo, "branch", "init", time.Now()) - if err != nil { - t.Fatal(err) - } - - if err = createBranch(repo, "test"); err != nil { - t.Fatal(err) - } - - secondCommit, err := commitFile(repo, "branch", "second", time.Now()) - if err != nil { - t.Fatal(err) - } - - tests := []struct { - name string - branch string - filesCreated map[string]string - lastRevision string - expectedCommit string - expectedConcreteCommit bool - expectedErr string - }{ - { - name: "Default branch", - branch: "master", - filesCreated: map[string]string{"branch": "init"}, - expectedCommit: firstCommit.String(), - expectedConcreteCommit: true, - }, - { - name: "skip clone if LastRevision hasn't changed", - branch: "master", - filesCreated: map[string]string{"branch": "init"}, - lastRevision: fmt.Sprintf("master/%s", firstCommit.String()), - expectedCommit: firstCommit.String(), - expectedConcreteCommit: false, - }, - { - name: "Other branch - revision has changed", - branch: "test", - filesCreated: map[string]string{"branch": "second"}, - lastRevision: fmt.Sprintf("master/%s", firstCommit.String()), - expectedCommit: secondCommit.String(), - expectedConcreteCommit: true, - }, - { - name: "Non existing branch", - branch: "invalid", - expectedErr: "couldn't find remote ref \"refs/heads/invalid\"", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - branch := CheckoutBranch{ - Branch: tt.branch, - LastRevision: tt.lastRevision, - } - tmpDir := t.TempDir() - - cc, err := branch.Checkout(context.TODO(), tmpDir, path, nil) - if tt.expectedErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) - g.Expect(cc).To(BeNil()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.branch + "/" + tt.expectedCommit)) - g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectedConcreteCommit)) - - if tt.expectedConcreteCommit { - for k, v := range tt.filesCreated { - g.Expect(filepath.Join(tmpDir, k)).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, k))).To(BeEquivalentTo(v)) - } - } - }) - } -} - -func TestCheckoutTag_Checkout(t *testing.T) { - type testTag struct { - name string - annotated bool - } - - tests := []struct { - name string - tagsInRepo []testTag - checkoutTag string - lastRevTag string - expectConcreteCommit bool - expectErr string - }{ - { - name: "Tag", - tagsInRepo: []testTag{{"tag-1", false}}, - checkoutTag: "tag-1", - expectConcreteCommit: true, - }, - { - name: "Annotated", - tagsInRepo: []testTag{{"annotated", true}}, - checkoutTag: "annotated", - expectConcreteCommit: true, - }, - { - name: "Non existing tag", - // Without this go-git returns error "remote repository is empty". - tagsInRepo: []testTag{{"tag-1", false}}, - checkoutTag: "invalid", - expectErr: "couldn't find remote ref \"refs/tags/invalid\"", - }, - { - name: "Skip clone - last revision unchanged", - tagsInRepo: []testTag{{"tag-1", false}}, - checkoutTag: "tag-1", - lastRevTag: "tag-1", - expectConcreteCommit: false, - }, - { - name: "Last revision changed", - tagsInRepo: []testTag{{"tag-1", false}, {"tag-2", false}}, - checkoutTag: "tag-2", - lastRevTag: "tag-1", - expectConcreteCommit: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - repo, path, err := initRepo(t) - if err != nil { - t.Fatal(err) - } - - // Collect tags and their associated commit hash for later - // reference. - tagCommits := map[string]string{} - - // Populate the repo with commits and tags. - if tt.tagsInRepo != nil { - for _, tr := range tt.tagsInRepo { - h, err := commitFile(repo, "tag", tr.name, time.Now()) - if err != nil { - t.Fatal(err) - } - _, err = tag(repo, h, tr.annotated, tr.name, time.Now()) - if err != nil { - t.Fatal(err) - } - tagCommits[tr.name] = h.String() - } - } - - checkoutTag := CheckoutTag{ - Tag: tt.checkoutTag, - } - // If last revision is provided, configure it. - if tt.lastRevTag != "" { - lc := tagCommits[tt.lastRevTag] - checkoutTag.LastRevision = fmt.Sprintf("%s/%s", tt.lastRevTag, lc) - } - - tmpDir := t.TempDir() - - cc, err := checkoutTag.Checkout(context.TODO(), tmpDir, path, nil) - if tt.expectErr != "" { - g.Expect(err).ToNot(BeNil()) - g.Expect(err.Error()).To(ContainSubstring(tt.expectErr)) - g.Expect(cc).To(BeNil()) - return - } - - // Check successful checkout results. - g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectConcreteCommit)) - targetTagHash := tagCommits[tt.checkoutTag] - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.checkoutTag + "/" + targetTagHash)) - - // Check file content only when there's an actual checkout. - if tt.lastRevTag != tt.checkoutTag { - g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.checkoutTag)) - } - }) - } -} - -func TestCheckoutCommit_Checkout(t *testing.T) { - repo, path, err := initRepo(t) - if err != nil { - t.Fatal(err) - } - - firstCommit, err := commitFile(repo, "commit", "init", time.Now()) - if err != nil { - t.Fatal(err) - } - if err = createBranch(repo, "other-branch"); err != nil { - t.Fatal(err) - } - secondCommit, err := commitFile(repo, "commit", "second", time.Now()) - if err != nil { - t.Fatal(err) - } - - tests := []struct { - name string - commit string - branch string - expectCommit string - expectFile string - expectError string - }{ - { - name: "Commit", - commit: firstCommit.String(), - expectCommit: "HEAD/" + firstCommit.String(), - expectFile: "init", - }, - { - name: "Commit in specific branch", - commit: secondCommit.String(), - branch: "other-branch", - expectCommit: "other-branch/" + secondCommit.String(), - expectFile: "second", - }, - { - name: "Non existing commit", - commit: "a-random-invalid-commit", - expectError: "failed to resolve commit object for 'a-random-invalid-commit': object not found", - }, - { - name: "Non existing commit in specific branch", - commit: secondCommit.String(), - branch: "master", - expectError: "object not found", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - commit := CheckoutCommit{ - Commit: tt.commit, - Branch: tt.branch, - } - - tmpDir := t.TempDir() - - cc, err := commit.Checkout(context.TODO(), tmpDir, path, nil) - if tt.expectError != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.expectError)) - g.Expect(cc).To(BeNil()) - return - } - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc).ToNot(BeNil()) - g.Expect(cc.String()).To(Equal(tt.expectCommit)) - g.Expect(filepath.Join(tmpDir, "commit")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "commit"))).To(BeEquivalentTo(tt.expectFile)) - }) - } -} - -func TestCheckoutTagSemVer_Checkout(t *testing.T) { - now := time.Now() - - tags := []struct { - tag string - annotated bool - commitTime time.Time - tagTime time.Time - }{ - { - tag: "v0.0.1", - annotated: false, - commitTime: now, - }, - { - tag: "v0.1.0+build-1", - annotated: true, - commitTime: now.Add(10 * time.Minute), - tagTime: now.Add(2 * time.Hour), // This should be ignored during TS comparisons - }, - { - tag: "v0.1.0+build-2", - annotated: false, - commitTime: now.Add(30 * time.Minute), - }, - { - tag: "v0.1.0+build-3", - annotated: true, - commitTime: now.Add(1 * time.Hour), - tagTime: now.Add(1 * time.Hour), // This should be ignored during TS comparisons - }, - { - tag: "0.2.0", - annotated: true, - commitTime: now, - tagTime: now, - }, - } - tests := []struct { - name string - constraint string - expectErr error - expectTag string - }{ - { - name: "Orders by SemVer", - constraint: ">0.1.0", - expectTag: "0.2.0", - }, - { - name: "Orders by SemVer and timestamp", - constraint: "<0.2.0", - expectTag: "v0.1.0+build-3", - }, - { - name: "Errors without match", - constraint: ">=1.0.0", - expectErr: errors.New("no match found for semver: >=1.0.0"), - }, - } - - repo, path, err := initRepo(t) - if err != nil { - t.Fatal(err) - } - - refs := make(map[string]string, len(tags)) - for _, tt := range tags { - ref, err := commitFile(repo, "tag", tt.tag, tt.commitTime) - if err != nil { - t.Fatal(err) - } - _, err = tag(repo, ref, tt.annotated, tt.tag, tt.tagTime) - if err != nil { - t.Fatal(err) - } - refs[tt.tag] = ref.String() - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - semVer := CheckoutSemVer{ - SemVer: tt.constraint, - } - tmpDir := t.TempDir() - - cc, err := semVer.Checkout(context.TODO(), tmpDir, path, nil) - if tt.expectErr != nil { - g.Expect(err).To(Equal(tt.expectErr)) - g.Expect(cc).To(BeNil()) - return - } - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.expectTag + "/" + refs[tt.expectTag])) - g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.expectTag)) - }) - } -} - -// Test_KeyTypes assures support for the different types of keys -// for SSH Authentication supported by Flux. -func Test_KeyTypes(t *testing.T) { - tests := []struct { - name string - keyType ssh.KeyPairType - authorized bool - wantErr string - }{ - {name: "RSA 4096", keyType: ssh.RSA_4096, authorized: true}, - {name: "ECDSA P256", keyType: ssh.ECDSA_P256, authorized: true}, - {name: "ECDSA P384", keyType: ssh.ECDSA_P384, authorized: true}, - {name: "ECDSA P521", keyType: ssh.ECDSA_P521, authorized: true}, - {name: "ED25519", keyType: ssh.ED25519, authorized: true}, - {name: "unauthorized key", keyType: ssh.RSA_4096, wantErr: "unable to authenticate, attempted methods [none publickey], no supported methods remain"}, - } - - serverRootDir := t.TempDir() - server := gittestserver.NewGitServer(serverRootDir) - - // Auth needs to be called, for authentication to be enabled. - server.Auth("", "") - - var authorizedPublicKey string - server.PublicKeyLookupFunc(func(content string) (*gitkit.PublicKey, error) { - authedKey := strings.TrimSuffix(string(authorizedPublicKey), "\n") - if authedKey == content { - return &gitkit.PublicKey{Content: content}, nil - } - return nil, fmt.Errorf("pubkey provided '%s' does not match %s", content, authedKey) - }) - - g := NewWithT(t) - timeout := 5 * time.Second - - server.KeyDir(filepath.Join(server.Root(), "keys")) - g.Expect(server.ListenSSH()).To(Succeed()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - repoPath := "test.git" - err := server.InitRepo(testRepositoryPath, git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - sshURL := server.SSHAddress() - repoURL := sshURL + "/" + repoPath - - // Fetch host key. - u, err := url.Parse(sshURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) - g.Expect(err).ToNot(HaveOccurred()) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - // Generate ssh keys based on key type. - kp, err := ssh.GenerateKeyPair(tt.keyType) - g.Expect(err).ToNot(HaveOccurred()) - - // Update authorized key to ensure only the new key is valid on the server. - if tt.authorized { - authorizedPublicKey = string(kp.PublicKey) - } - - secret := corev1.Secret{ - Data: map[string][]byte{ - "identity": kp.PrivateKey, - "known_hosts": knownHosts, - }, - } - - authOpts, err := git.AuthOptionsFromSecret(repoURL, &secret) - g.Expect(err).ToNot(HaveOccurred()) - - // Prepare for checkout. - branchCheckoutStrat := &CheckoutBranch{Branch: git.DefaultBranch} - tmpDir := t.TempDir() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - // Checkout the repo. - commit, err := branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) - - if tt.wantErr == "" { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(commit).ToNot(BeNil()) - - // Confirm checkout actually happened. - d, err := os.ReadDir(tmpDir) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(d).To(HaveLen(2)) // .git and foo.txt - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).Should(ContainSubstring(tt.wantErr)) - } - }) - } -} - -// Test_KeyExchangeAlgos assures support for the different -// types of SSH key exchange algorithms supported by Flux. -func Test_KeyExchangeAlgos(t *testing.T) { - tests := []struct { - name string - ClientKex []string - ServerKex []string - wantErr string - }{ - { - name: "support for kex: diffie-hellman-group14-sha1", - ClientKex: []string{"diffie-hellman-group14-sha1"}, - ServerKex: []string{"diffie-hellman-group14-sha1"}, - }, - { - name: "support for kex: diffie-hellman-group14-sha256", - ClientKex: []string{"diffie-hellman-group14-sha256"}, - ServerKex: []string{"diffie-hellman-group14-sha256"}, - }, - { - name: "support for kex: curve25519-sha256", - ClientKex: []string{"curve25519-sha256"}, - ServerKex: []string{"curve25519-sha256"}, - }, - { - name: "support for kex: ecdh-sha2-nistp256", - ClientKex: []string{"ecdh-sha2-nistp256"}, - ServerKex: []string{"ecdh-sha2-nistp256"}, - }, - { - name: "support for kex: ecdh-sha2-nistp384", - ClientKex: []string{"ecdh-sha2-nistp384"}, - ServerKex: []string{"ecdh-sha2-nistp384"}, - }, - { - name: "support for kex: ecdh-sha2-nistp521", - ClientKex: []string{"ecdh-sha2-nistp521"}, - ServerKex: []string{"ecdh-sha2-nistp521"}, - }, - { - name: "support for kex: curve25519-sha256@libssh.org", - ClientKex: []string{"curve25519-sha256@libssh.org"}, - ServerKex: []string{"curve25519-sha256@libssh.org"}, - }, - { - name: "non-matching kex", - ClientKex: []string{"ecdh-sha2-nistp521"}, - ServerKex: []string{"curve25519-sha256@libssh.org"}, - wantErr: "ssh: no common algorithm for key exchange; client offered: [ecdh-sha2-nistp521 ext-info-c], server offered: [curve25519-sha256@libssh.org]", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - timeout := 5 * time.Second - - serverRootDir := t.TempDir() - server := gittestserver.NewGitServer(serverRootDir).WithSSHConfig(&cryptossh.ServerConfig{ - Config: cryptossh.Config{ - KeyExchanges: tt.ServerKex, - }, - }) - - // Set what Client Key Exchange Algos to send - git.KexAlgos = tt.ClientKex - - server.KeyDir(filepath.Join(server.Root(), "keys")) - g.Expect(server.ListenSSH()).To(Succeed()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - repoPath := "test.git" - err := server.InitRepo(testRepositoryPath, git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - sshURL := server.SSHAddress() - repoURL := sshURL + "/" + repoPath - - // Fetch host key. - u, err := url.Parse(sshURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) - g.Expect(err).ToNot(HaveOccurred()) - - // No authentication is required for this test, but it is - // used here to make the Checkout logic happy. - kp, err := ssh.GenerateKeyPair(ssh.ED25519) - g.Expect(err).ToNot(HaveOccurred()) - - secret := corev1.Secret{ - Data: map[string][]byte{ - "identity": kp.PrivateKey, - "known_hosts": knownHosts, - }, - } - - authOpts, err := git.AuthOptionsFromSecret(repoURL, &secret) - g.Expect(err).ToNot(HaveOccurred()) - - // Prepare for checkout. - branchCheckoutStrat := &CheckoutBranch{Branch: git.DefaultBranch} - tmpDir := t.TempDir() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - // Checkout the repo. - _, err = branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) - if tt.wantErr != "" { - g.Expect(err).Error().Should(HaveOccurred()) - g.Expect(err.Error()).Should(ContainSubstring(tt.wantErr)) - } else { - g.Expect(err).Error().ShouldNot(HaveOccurred()) - } - }) - } -} - -// TestHostKeyAlgos assures support for the different -// types of SSH Host Key algorithms supported by Flux. -func TestHostKeyAlgos(t *testing.T) { - tests := []struct { - name string - keyType ssh.KeyPairType - ClientHostKeyAlgos []string - hashHostNames bool - }{ - { - name: "support for hostkey: ssh-rsa", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"ssh-rsa"}, - }, - { - name: "support for hostkey: rsa-sha2-256", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-256"}, - }, - { - name: "support for hostkey: rsa-sha2-512", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-512"}, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp256", - keyType: ssh.ECDSA_P256, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp256"}, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp384", - keyType: ssh.ECDSA_P384, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp384"}, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp521", - keyType: ssh.ECDSA_P521, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp521"}, - }, - { - name: "support for hostkey: ssh-ed25519", - keyType: ssh.ED25519, - ClientHostKeyAlgos: []string{"ssh-ed25519"}, - }, - { - name: "support for hostkey: ssh-rsa with hashed host names", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"ssh-rsa"}, - hashHostNames: true, - }, - { - name: "support for hostkey: rsa-sha2-256 with hashed host names", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-256"}, - hashHostNames: true, - }, - { - name: "support for hostkey: rsa-sha2-512 with hashed host names", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-512"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp256 with hashed host names", - keyType: ssh.ECDSA_P256, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp256"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp384 with hashed host names", - keyType: ssh.ECDSA_P384, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp384"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp521 with hashed host names", - keyType: ssh.ECDSA_P521, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp521"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ssh-ed25519 with hashed host names", - keyType: ssh.ED25519, - ClientHostKeyAlgos: []string{"ssh-ed25519"}, - hashHostNames: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - timeout := 5 * time.Second - - sshConfig := &cryptossh.ServerConfig{} - - // Generate new keypair for the server to use for HostKeys. - hkp, err := ssh.GenerateKeyPair(tt.keyType) - g.Expect(err).NotTo(HaveOccurred()) - p, err := cryptossh.ParseRawPrivateKey(hkp.PrivateKey) - g.Expect(err).NotTo(HaveOccurred()) - - // Add key to server. - signer, err := cryptossh.NewSignerFromKey(p) - g.Expect(err).NotTo(HaveOccurred()) - sshConfig.AddHostKey(signer) - - serverRootDir := t.TempDir() - server := gittestserver.NewGitServer(serverRootDir).WithSSHConfig(sshConfig) - - // Set what HostKey Algos will be accepted from a client perspective. - git.HostKeyAlgos = tt.ClientHostKeyAlgos - - keyDir := filepath.Join(server.Root(), "keys") - server.KeyDir(keyDir) - g.Expect(server.ListenSSH()).To(Succeed()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - repoPath := "test.git" - err = server.InitRepo(testRepositoryPath, git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - sshURL := server.SSHAddress() - repoURL := sshURL + "/" + repoPath - - // Fetch host key. - u, err := url.Parse(sshURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, tt.hashHostNames) - g.Expect(err).ToNot(HaveOccurred()) - - // No authentication is required for this test, but it is - // used here to make the Checkout logic happy. - kp, err := ssh.GenerateKeyPair(ssh.ED25519) - g.Expect(err).ToNot(HaveOccurred()) - - secret := corev1.Secret{ - Data: map[string][]byte{ - "identity": kp.PrivateKey, - "known_hosts": knownHosts, - }, - } - - authOpts, err := git.AuthOptionsFromSecret(repoURL, &secret) - g.Expect(err).ToNot(HaveOccurred()) - - // Prepare for checkout. - branchCheckoutStrat := &CheckoutBranch{Branch: git.DefaultBranch} - tmpDir := t.TempDir() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - // Checkout the repo. - _, err = branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) - g.Expect(err).Error().ShouldNot(HaveOccurred()) - }) - } -} - -func initRepo(t *testing.T) (*extgogit.Repository, string, error) { - tmpDir := t.TempDir() - sto := filesystem.NewStorage(osfs.New(tmpDir), cache.NewObjectLRUDefault()) - repo, err := extgogit.Init(sto, memfs.New()) - if err != nil { - return nil, "", err - } - return repo, tmpDir, err -} - -func createBranch(repo *extgogit.Repository, branch string) error { - wt, err := repo.Worktree() - if err != nil { - return err - } - h, err := repo.Head() - if err != nil { - return err - } - return wt.Checkout(&extgogit.CheckoutOptions{ - Hash: h.Hash(), - Branch: plumbing.ReferenceName("refs/heads/" + branch), - Create: true, - }) -} - -func commitFile(repo *extgogit.Repository, path, content string, time time.Time) (plumbing.Hash, error) { - wt, err := repo.Worktree() - if err != nil { - return plumbing.Hash{}, err - } - f, err := wt.Filesystem.Create(path) - if err != nil { - return plumbing.Hash{}, err - } - if _, err = f.Write([]byte(content)); err != nil { - f.Close() - return plumbing.Hash{}, err - } - if err = f.Close(); err != nil { - return plumbing.Hash{}, err - } - if _, err = wt.Add(path); err != nil { - return plumbing.Hash{}, err - } - return wt.Commit("Adding: "+path, &extgogit.CommitOptions{ - Author: mockSignature(time), - Committer: mockSignature(time), - }) -} - -func tag(repo *extgogit.Repository, commit plumbing.Hash, annotated bool, tag string, time time.Time) (*plumbing.Reference, error) { - var opts *extgogit.CreateTagOptions - if annotated { - opts = &extgogit.CreateTagOptions{ - Tagger: mockSignature(time), - Message: "Annotated tag for: " + tag, - } - } - return repo.CreateTag(tag, commit, opts) -} - -func mockSignature(time time.Time) *object.Signature { - return &object.Signature{ - Name: "Jane Doe", - Email: "jane@example.com", - When: time, - } -} diff --git a/pkg/git/gogit/transport.go b/pkg/git/gogit/transport.go deleted file mode 100644 index 977e8f7fd..000000000 --- a/pkg/git/gogit/transport.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gogit - -import ( - "fmt" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/http" - "github.com/go-git/go-git/v5/plumbing/transport/ssh" - - "github.com/fluxcd/pkg/ssh/knownhosts" - - "github.com/fluxcd/source-controller/pkg/git" - - gossh "golang.org/x/crypto/ssh" -) - -// transportAuth constructs the transport.AuthMethod for the git.Transport of -// the given git.AuthOptions. It returns the result, or an error. -func transportAuth(opts *git.AuthOptions) (transport.AuthMethod, error) { - if opts == nil { - return nil, nil - } - switch opts.Transport { - case git.HTTPS, git.HTTP: - // Some providers (i.e. GitLab) will reject empty credentials for - // public repositories. - if opts.Username != "" || opts.Password != "" { - return &http.BasicAuth{ - Username: opts.Username, - Password: opts.Password, - }, nil - } - return nil, nil - case git.SSH: - if len(opts.Identity) > 0 { - pk, err := ssh.NewPublicKeys(opts.Username, opts.Identity, opts.Password) - if err != nil { - return nil, err - } - if len(opts.KnownHosts) > 0 { - callback, err := knownhosts.New(opts.KnownHosts) - if err != nil { - return nil, err - } - pk.HostKeyCallback = callback - } - customPK := &CustomPublicKeys{ - pk: pk, - } - return customPK, nil - } - case "": - return nil, fmt.Errorf("no transport type set") - default: - return nil, fmt.Errorf("unknown transport '%s'", opts.Transport) - } - return nil, nil -} - -// caBundle returns the CA bundle from the given git.AuthOptions. -func caBundle(opts *git.AuthOptions) []byte { - if opts == nil { - return nil - } - return opts.CAFile -} - -// CustomPublicKeys is a wrapper around ssh.PublicKeys to help us -// customize the ssh config. It implements ssh.AuthMethod. -type CustomPublicKeys struct { - pk *ssh.PublicKeys -} - -func (a *CustomPublicKeys) Name() string { - return a.pk.Name() -} - -func (a *CustomPublicKeys) String() string { - return a.pk.String() -} - -func (a *CustomPublicKeys) ClientConfig() (*gossh.ClientConfig, error) { - config, err := a.pk.ClientConfig() - if err != nil { - return nil, err - } - if len(git.KexAlgos) > 0 { - config.Config.KeyExchanges = git.KexAlgos - } - if len(git.HostKeyAlgos) > 0 { - config.HostKeyAlgorithms = git.HostKeyAlgos - } - - return config, nil -} diff --git a/pkg/git/gogit/transport_test.go b/pkg/git/gogit/transport_test.go deleted file mode 100644 index 729668190..000000000 --- a/pkg/git/gogit/transport_test.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gogit - -import ( - "errors" - "testing" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/http" - . "github.com/onsi/gomega" - - "github.com/fluxcd/source-controller/pkg/git" -) - -const ( - // privateKeyFixture is a randomly generated password less - // 512bit RSA private key. - privateKeyFixture = `-----BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQCrakELAKxozvwJijQEggYlTvS1QTZx1DaBwOhW/4kRSuR21plu -xuQeyuUiztoWeb9jgW7wjzG4j1PIJjdbsgjPIcIZ4PBY7JeEW+QRopfwuN8MHXNp -uTLgIHbkmhoOg5qBEcjzO/lEOOPpV0EmbObgqv3+wRmLJrgfzWl/cTtRewIDAQAB -AoGAawKFImpEN5Xn78iwWpQVZBsbV0AjzgHuGSiloxIZrorzf2DPHkHZzYNaclVx -/o/4tBTsfg7WumH3qr541qyZJDgU7iRMABwmx0v1vm2wQiX7NJzLzH2E9vlMC3mw -d8S99g9EqRuNH98XX8su34B9WGRPqiKvEm0RW8Hideo2/KkCQQDbs6rHcriKQyPB -paidHZAfguu0eVbyHT2EgLgRboWE+tEAqFEW2ycqNL3VPz9fRvwexbB6rpOcPpQJ -DEL4XB2XAkEAx7xJz8YlCQ2H38xggK8R8EUXF9Zhb0fqMJHMNmao1HCHVMtbsa8I -jR2EGyQ4CaIqNG5tdWukXQSJrPYDRWNvvQJAZX3rP7XUYDLB2twvN12HzbbKMhX3 -v2MYnxRjc9INpi/Dyzz2MMvOnOW+aDuOh/If2AtVCmeJUx1pf4CFk3viQwJBAKyC -t824+evjv+NQBlme3AOF6PgxtV4D4wWoJ5Uk/dTejER0j/Hbl6sqPxuiILRRV9qJ -Ngkgu4mLjc3RfenEhJECQAx8zjWUE6kHHPGAd9DfiAIQ4bChqnyS0Nwb9+Gd4hSE -P0Ah10mHiK/M0o3T8Eanwum0gbQHPnOwqZgsPkwXRqQ= ------END RSA PRIVATE KEY-----` - - // privateKeyPassphraseFixture is a randomly generated - // 512bit RSA private key with password foobar. - privateKeyPassphraseFixture = `-----BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: AES-256-CBC,0B016973B2A761D31E6B388D0F327C35 - -X9GET/qAyZkAJBl/RK+1XX75NxONgdUfZDw7PIYi/g+Efh3Z5zH5kh/dx9lxH5ZG -HGCqPAeMO/ofGDGtDULWW6iqDUFRu5gPgEVSCnnbqoHNU325WHhXdhejVAItwObC -IpL/zYfs2+gDHXct/n9FJ/9D/EGXZihwPqYaK8GQSfZAxz0QjLuh0wU1qpbm3y3N -q+o9FLv3b2Ys/tCJOUsYVQOYLSrZEI77y1ii3nWgQ8lXiTJbBUKzuq4f1YWeO8Ah -RZbdhTa57AF5lUaRtL7Nrm3HJUrK1alBbU7HHyjeW4Q4n/D3fiRDC1Mh2Bi4EOOn -wGctSx4kHsZGhJv5qwKqqPEFPhUzph8D2tm2TABk8HJa5KJFDbGrcfvk2uODAoZr -MbcpIxCfl8oB09bWfY6tDQjyvwSYYo2Phdwm7kT92xc= ------END RSA PRIVATE KEY-----` - - // knownHostsFixture is known_hosts fixture in the expected - // format. - knownHostsFixture string = `github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==` -) - -func Test_transportAuth(t *testing.T) { - tests := []struct { - name string - opts *git.AuthOptions - wantFunc func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) - kexAlgos []string - wantErr error - }{ - { - name: "Public HTTP Repositories", - opts: &git.AuthOptions{ - Transport: git.HTTP, - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - g.Expect(t).To(BeNil()) - }, - }, - { - name: "Public HTTPS Repositories", - opts: &git.AuthOptions{ - Transport: git.HTTP, - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - g.Expect(t).To(BeNil()) - }, - }, - { - name: "HTTP basic auth", - opts: &git.AuthOptions{ - Transport: git.HTTP, - Username: "example", - Password: "password", - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - g.Expect(t).To(Equal(&http.BasicAuth{ - Username: opts.Username, - Password: opts.Password, - })) - }, - }, - { - name: "HTTPS basic auth", - opts: &git.AuthOptions{ - Transport: git.HTTPS, - Username: "example", - Password: "password", - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - g.Expect(t).To(Equal(&http.BasicAuth{ - Username: opts.Username, - Password: opts.Password, - })) - }, - }, - { - name: "SSH private key", - opts: &git.AuthOptions{ - Transport: git.SSH, - Username: "example", - Identity: []byte(privateKeyFixture), - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - tt, ok := t.(*CustomPublicKeys) - g.Expect(ok).To(BeTrue()) - g.Expect(tt.pk.User).To(Equal(opts.Username)) - g.Expect(tt.pk.Signer.PublicKey().Type()).To(Equal("ssh-rsa")) - }, - }, - { - name: "SSH private key with passphrase", - opts: &git.AuthOptions{ - Transport: git.SSH, - Username: "example", - Password: "foobar", - Identity: []byte(privateKeyPassphraseFixture), - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - tt, ok := t.(*CustomPublicKeys) - g.Expect(ok).To(BeTrue()) - g.Expect(tt.pk.User).To(Equal(opts.Username)) - g.Expect(tt.pk.Signer.PublicKey().Type()).To(Equal("ssh-rsa")) - }, - }, - { - name: "SSH with custom key exchanges", - opts: &git.AuthOptions{ - Transport: git.SSH, - Username: "example", - Identity: []byte(privateKeyFixture), - KnownHosts: []byte(knownHostsFixture), - }, - kexAlgos: []string{"curve25519-sha256", "diffie-hellman-group-exchange-sha256"}, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - tt, ok := t.(*CustomPublicKeys) - g.Expect(ok).To(BeTrue()) - g.Expect(tt.pk.User).To(Equal(opts.Username)) - g.Expect(tt.pk.Signer.PublicKey().Type()).To(Equal("ssh-rsa")) - config, err := tt.ClientConfig() - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(config.Config.KeyExchanges).To(Equal( - []string{"curve25519-sha256", "diffie-hellman-group-exchange-sha256"}), - ) - }, - }, - { - name: "SSH private key with invalid passphrase", - opts: &git.AuthOptions{ - Transport: git.SSH, - Username: "example", - Password: "", - Identity: []byte(privateKeyPassphraseFixture), - }, - wantErr: errors.New("x509: decryption password incorrect"), - }, - { - name: "SSH private key with known_hosts", - opts: &git.AuthOptions{ - Transport: git.SSH, - Username: "example", - Identity: []byte(privateKeyFixture), - KnownHosts: []byte(knownHostsFixture), - }, - wantFunc: func(g *WithT, t transport.AuthMethod, opts *git.AuthOptions) { - tt, ok := t.(*CustomPublicKeys) - g.Expect(ok).To(BeTrue()) - g.Expect(tt.pk.User).To(Equal(opts.Username)) - g.Expect(tt.pk.Signer.PublicKey().Type()).To(Equal("ssh-rsa")) - g.Expect(tt.pk.HostKeyCallback).ToNot(BeNil()) - }, - }, - { - name: "SSH private key with invalid known_hosts", - opts: &git.AuthOptions{ - Transport: git.SSH, - Username: "example", - Identity: []byte(privateKeyFixture), - KnownHosts: []byte("invalid"), - }, - wantErr: errors.New("knownhosts: knownhosts: missing host pattern"), - }, - { - name: "Empty", - opts: &git.AuthOptions{}, - wantErr: errors.New("no transport type set"), - }, - { - name: "Unknown transport", - opts: &git.AuthOptions{ - Transport: "foo", - }, - wantErr: errors.New("unknown transport 'foo'"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - if len(tt.kexAlgos) > 0 { - git.KexAlgos = tt.kexAlgos - } - - got, err := transportAuth(tt.opts) - if tt.wantErr != nil { - g.Expect(err).To(Equal(tt.wantErr)) - g.Expect(got).To(BeNil()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - if tt.wantFunc != nil { - tt.wantFunc(g, got, tt.opts) - } - }) - } -} - -func Test_caBundle(t *testing.T) { - g := NewWithT(t) - - g.Expect(caBundle(&git.AuthOptions{CAFile: []byte("foo")})).To(BeEquivalentTo("foo")) - g.Expect(caBundle(nil)).To(BeNil()) -} diff --git a/pkg/git/libgit2/checkout.go b/pkg/git/libgit2/checkout.go deleted file mode 100644 index 21b54f394..000000000 --- a/pkg/git/libgit2/checkout.go +++ /dev/null @@ -1,566 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package libgit2 - -import ( - "context" - "errors" - "fmt" - "sort" - "strings" - "time" - - "github.com/Masterminds/semver/v3" - "github.com/go-logr/logr" - git2go "github.com/libgit2/git2go/v33" - - "github.com/fluxcd/pkg/gitutil" - "github.com/fluxcd/pkg/version" - - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" -) - -const defaultRemoteName = "origin" - -// CheckoutStrategyForOptions returns the git.CheckoutStrategy for the given -// git.CheckoutOptions. -func CheckoutStrategyForOptions(ctx context.Context, opt git.CheckoutOptions) git.CheckoutStrategy { - if opt.RecurseSubmodules { - logr.FromContextOrDiscard(ctx).Info(fmt.Sprintf("git submodule recursion not supported by implementation '%s'", Implementation)) - } - switch { - case opt.Commit != "": - return &CheckoutCommit{Commit: opt.Commit} - case opt.SemVer != "": - return &CheckoutSemVer{SemVer: opt.SemVer} - case opt.Tag != "": - return &CheckoutTag{ - Tag: opt.Tag, - LastRevision: opt.LastRevision, - } - default: - branch := opt.Branch - if branch == "" { - branch = git.DefaultBranch - } - return &CheckoutBranch{ - Branch: branch, - LastRevision: opt.LastRevision, - } - } -} - -type CheckoutBranch struct { - Branch string - LastRevision string -} - -func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) { - defer recoverPanic(&err) - - err = registerManagedTransportOptions(ctx, url, opts) - if err != nil { - return nil, err - } - transportOptsURL := opts.TransportOptionsURL - remoteCallBacks := managed.RemoteCallbacks() - defer managed.RemoveTransportOptions(transportOptsURL) - - repo, remote, err := initializeRepoWithRemote(ctx, path, url, opts) - if err != nil { - return nil, err - } - // Open remote connection. - err = remote.ConnectFetch(&remoteCallBacks, nil, nil) - if err != nil { - remote.Free() - repo.Free() - return nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", url, gitutil.LibGit2Error(err)) - } - defer func() { - remote.Disconnect() - remote.Free() - repo.Free() - }() - - // When the last observed revision is set, check whether it is still the - // same at the remote branch. If so, short-circuit the clone operation here. - if c.LastRevision != "" { - heads, err := remote.Ls(c.Branch) - if err != nil { - return nil, fmt.Errorf("unable to remote ls for '%s': %w", url, gitutil.LibGit2Error(err)) - } - if len(heads) > 0 { - hash := heads[0].Id.String() - currentRevision := fmt.Sprintf("%s/%s", c.Branch, hash) - if currentRevision == c.LastRevision { - // Construct a partial commit with the existing information. - c := &git.Commit{ - Hash: git.Hash(hash), - Reference: "refs/heads/" + c.Branch, - } - return c, nil - } - } - } - - // Limit the fetch operation to the specific branch, to decrease network usage. - err = remote.Fetch([]string{c.Branch}, - &git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsNone, - RemoteCallbacks: remoteCallBacks, - }, - "") - if err != nil { - return nil, fmt.Errorf("unable to fetch remote '%s': %w", url, gitutil.LibGit2Error(err)) - } - - branch, err := repo.References.Lookup(fmt.Sprintf("refs/remotes/origin/%s", c.Branch)) - if err != nil { - return nil, fmt.Errorf("unable to lookup branch '%s' for '%s': %w", c.Branch, url, gitutil.LibGit2Error(err)) - } - defer branch.Free() - - upstreamCommit, err := repo.LookupCommit(branch.Target()) - if err != nil { - return nil, fmt.Errorf("unable to lookup commit '%s' for '%s': %w", c.Branch, url, gitutil.LibGit2Error(err)) - } - defer upstreamCommit.Free() - - // We try to lookup the branch (and create it if it doesn't exist), so that we can - // switch the repo to the specified branch. This is done so that users of this api - // can expect the repo to be at the desired branch, when cloned. - localBranch, err := repo.LookupBranch(c.Branch, git2go.BranchLocal) - if git2go.IsErrorCode(err, git2go.ErrorCodeNotFound) { - localBranch, err = repo.CreateBranch(c.Branch, upstreamCommit, false) - if err != nil { - return nil, fmt.Errorf("unable to create local branch '%s': %w", c.Branch, err) - } - } else if err != nil { - return nil, fmt.Errorf("unable to lookup branch '%s': %w", c.Branch, err) - } - defer localBranch.Free() - - tree, err := repo.LookupTree(upstreamCommit.TreeId()) - if err != nil { - return nil, fmt.Errorf("unable to lookup tree for branch '%s': %w", c.Branch, err) - } - defer tree.Free() - - err = repo.CheckoutTree(tree, &git2go.CheckoutOpts{ - // the remote branch should take precedence if it exists at this point in time. - Strategy: git2go.CheckoutForce, - }) - if err != nil { - return nil, fmt.Errorf("unable to checkout tree for branch '%s': %w", c.Branch, err) - } - - // Set the current head to point to the requested branch. - err = repo.SetHead("refs/heads/" + c.Branch) - if err != nil { - return nil, fmt.Errorf("unable to set HEAD to branch '%s':%w", c.Branch, err) - } - - // Use the current worktree's head as reference for the commit to be returned. - head, err := repo.Head() - if err != nil { - return nil, fmt.Errorf("unable to resolve HEAD: %w", err) - } - defer head.Free() - - cc, err := repo.LookupCommit(head.Target()) - if err != nil { - return nil, fmt.Errorf("unable to lookup HEAD commit '%s' for branch '%s': %w", head.Target(), c.Branch, err) - } - defer cc.Free() - - return buildCommit(cc, "refs/heads/"+c.Branch), nil -} - -type CheckoutTag struct { - Tag string - LastRevision string -} - -func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) { - defer recoverPanic(&err) - - err = registerManagedTransportOptions(ctx, url, opts) - if err != nil { - return nil, err - } - transportOptsURL := opts.TransportOptionsURL - remoteCallBacks := managed.RemoteCallbacks() - defer managed.RemoveTransportOptions(transportOptsURL) - - repo, remote, err := initializeRepoWithRemote(ctx, path, url, opts) - if err != nil { - return nil, err - } - // Open remote connection. - err = remote.ConnectFetch(&remoteCallBacks, nil, nil) - if err != nil { - remote.Free() - repo.Free() - return nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", url, gitutil.LibGit2Error(err)) - } - defer func() { - remote.Disconnect() - remote.Free() - repo.Free() - }() - - // When the last observed revision is set, check whether it is still the - // same at the remote branch. If so, short-circuit the clone operation here. - if c.LastRevision != "" { - heads, err := remote.Ls(c.Tag) - if err != nil { - return nil, fmt.Errorf("unable to remote ls for '%s': %w", url, gitutil.LibGit2Error(err)) - } - if len(heads) > 0 { - hash := heads[0].Id.String() - currentRevision := fmt.Sprintf("%s/%s", c.Tag, hash) - var same bool - if currentRevision == c.LastRevision { - same = true - } else if len(heads) > 1 { - hash = heads[1].Id.String() - currentAnnotatedRevision := fmt.Sprintf("%s/%s", c.Tag, hash) - if currentAnnotatedRevision == c.LastRevision { - same = true - } - } - if same { - // Construct a partial commit with the existing information. - c := &git.Commit{ - Hash: git.Hash(hash), - Reference: "refs/tags/" + c.Tag, - } - return c, nil - } - } - } - - err = remote.Fetch([]string{c.Tag}, - &git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsAuto, - RemoteCallbacks: remoteCallBacks, - }, - "") - - if err != nil { - return nil, fmt.Errorf("unable to fetch remote '%s': %w", url, gitutil.LibGit2Error(err)) - } - - cc, err := checkoutDetachedDwim(repo, c.Tag) - if err != nil { - return nil, err - } - defer cc.Free() - return buildCommit(cc, "refs/tags/"+c.Tag), nil -} - -type CheckoutCommit struct { - Commit string -} - -func (c *CheckoutCommit) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) { - defer recoverPanic(&err) - - err = registerManagedTransportOptions(ctx, url, opts) - if err != nil { - return nil, err - } - transportOptsURL := opts.TransportOptionsURL - defer managed.RemoveTransportOptions(transportOptsURL) - - repo, err := git2go.Clone(transportOptsURL, path, &git2go.CloneOptions{ - FetchOptions: git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsNone, - RemoteCallbacks: managed.RemoteCallbacks(), - }, - }) - if err != nil { - return nil, fmt.Errorf("unable to clone '%s': %w", url, gitutil.LibGit2Error(err)) - } - defer repo.Free() - oid, err := git2go.NewOid(c.Commit) - if err != nil { - return nil, fmt.Errorf("could not create oid for '%s': %w", c.Commit, err) - } - cc, err := checkoutDetachedHEAD(repo, oid) - if err != nil { - return nil, fmt.Errorf("git checkout error: %w", err) - } - return buildCommit(cc, ""), nil -} - -type CheckoutSemVer struct { - SemVer string -} - -func (c *CheckoutSemVer) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) { - defer recoverPanic(&err) - - err = registerManagedTransportOptions(ctx, url, opts) - if err != nil { - return nil, err - } - transportOptsURL := opts.TransportOptionsURL - defer managed.RemoveTransportOptions(transportOptsURL) - - verConstraint, err := semver.NewConstraint(c.SemVer) - if err != nil { - return nil, fmt.Errorf("semver parse error: %w", err) - } - - repo, err := git2go.Clone(transportOptsURL, path, &git2go.CloneOptions{ - FetchOptions: git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsAll, - RemoteCallbacks: managed.RemoteCallbacks(), - }, - }) - if err != nil { - return nil, fmt.Errorf("unable to clone '%s': %w", url, gitutil.LibGit2Error(err)) - } - defer repo.Free() - - tags := make(map[string]string) - tagTimestamps := make(map[string]time.Time) - if err := repo.Tags.Foreach(func(name string, id *git2go.Oid) error { - cleanName := strings.TrimPrefix(name, "refs/tags/") - // The given ID can refer to both a commit and a tag, as annotated tags contain additional metadata. - // Due to this, first attempt to resolve it as a simple tag (commit), but fallback to attempting to - // resolve it as an annotated tag in case this results in an error. - if c, err := repo.LookupCommit(id); err == nil { - defer c.Free() - // Use the commit metadata as the decisive timestamp. - tagTimestamps[cleanName] = c.Committer().When - tags[cleanName] = name - return nil - } - t, err := repo.LookupTag(id) - if err != nil { - return fmt.Errorf("could not lookup '%s' as simple or annotated tag: %w", cleanName, err) - } - defer t.Free() - commit, err := t.Peel(git2go.ObjectCommit) - if err != nil { - return fmt.Errorf("could not get commit for tag '%s': %w", t.Name(), err) - } - defer commit.Free() - c, err := commit.AsCommit() - if err != nil { - return fmt.Errorf("could not get commit object for tag '%s': %w", t.Name(), err) - } - defer c.Free() - tagTimestamps[t.Name()] = c.Committer().When - tags[t.Name()] = name - return nil - }); err != nil { - return nil, err - } - - var matchedVersions semver.Collection - for tag := range tags { - v, err := version.ParseVersion(tag) - if err != nil { - continue - } - if !verConstraint.Check(v) { - continue - } - matchedVersions = append(matchedVersions, v) - } - if len(matchedVersions) == 0 { - return nil, fmt.Errorf("no match found for semver: %s", c.SemVer) - } - - // Sort versions - sort.SliceStable(matchedVersions, func(i, j int) bool { - left := matchedVersions[i] - right := matchedVersions[j] - - if !left.Equal(right) { - return left.LessThan(right) - } - - // Having tag target timestamps at our disposal, we further try to sort - // versions into a chronological order. This is especially important for - // versions that differ only by build metadata, because it is not considered - // a part of the comparable version in Semver - return tagTimestamps[left.Original()].Before(tagTimestamps[right.Original()]) - }) - v := matchedVersions[len(matchedVersions)-1] - t := v.Original() - - cc, err := checkoutDetachedDwim(repo, t) - if err != nil { - return nil, err - } - defer cc.Free() - return buildCommit(cc, "refs/tags/"+t), nil -} - -// checkoutDetachedDwim attempts to perform a detached HEAD checkout by first DWIMing the short name -// to get a concrete reference, and then calling checkoutDetachedHEAD. -func checkoutDetachedDwim(repo *git2go.Repository, name string) (*git2go.Commit, error) { - ref, err := repo.References.Dwim(name) - if err != nil { - return nil, fmt.Errorf("unable to find '%s': %w", name, err) - } - defer ref.Free() - c, err := ref.Peel(git2go.ObjectCommit) - if err != nil { - return nil, fmt.Errorf("could not get commit for ref '%s': %w", ref.Name(), err) - } - defer c.Free() - cc, err := c.AsCommit() - if err != nil { - return nil, fmt.Errorf("could not get commit object for ref '%s': %w", ref.Name(), err) - } - defer cc.Free() - return checkoutDetachedHEAD(repo, cc.Id()) -} - -// checkoutDetachedHEAD attempts to perform a detached HEAD checkout for the given commit. -func checkoutDetachedHEAD(repo *git2go.Repository, oid *git2go.Oid) (*git2go.Commit, error) { - cc, err := repo.LookupCommit(oid) - if err != nil { - return nil, fmt.Errorf("git commit '%s' not found: %w", oid.String(), err) - } - if err = repo.SetHeadDetached(cc.Id()); err != nil { - cc.Free() - return nil, fmt.Errorf("could not detach HEAD at '%s': %w", oid.String(), err) - } - if err = repo.CheckoutHead(&git2go.CheckoutOptions{ - Strategy: git2go.CheckoutForce, - }); err != nil { - cc.Free() - return nil, fmt.Errorf("git checkout error: %w", err) - } - return cc, nil -} - -// headCommit returns the current HEAD of the repository, or an error. -func headCommit(repo *git2go.Repository) (*git2go.Commit, error) { - head, err := repo.Head() - if err != nil { - return nil, err - } - defer head.Free() - c, err := repo.LookupCommit(head.Target()) - if err != nil { - return nil, err - } - return c, nil -} - -func buildCommit(c *git2go.Commit, ref string) *git.Commit { - sig, msg, _ := c.ExtractSignature() - return &git.Commit{ - Hash: []byte(c.Id().String()), - Reference: ref, - Author: buildSignature(c.Author()), - Committer: buildSignature(c.Committer()), - Signature: sig, - Encoded: []byte(msg), - Message: c.Message(), - } -} - -func buildSignature(s *git2go.Signature) git.Signature { - return git.Signature{ - Name: s.Name, - Email: s.Email, - When: s.When, - } -} - -// initializeRepoWithRemote initializes or opens a repository at the given path -// and configures it with the given transport opts URL (as a placeholder for the -// actual target url). If a remote already exists with a different URL, it overwrites -// it with the provided transport opts URL. -func initializeRepoWithRemote(ctx context.Context, path, url string, opts *git.AuthOptions) (*git2go.Repository, *git2go.Remote, error) { - repo, err := git2go.InitRepository(path, false) - if err != nil { - return nil, nil, fmt.Errorf("unable to init repository for '%s': %w", url, gitutil.LibGit2Error(err)) - } - - transportOptsURL := opts.TransportOptionsURL - remote, err := repo.Remotes.Create(defaultRemoteName, transportOptsURL) - if err != nil { - // If the remote already exists, lookup the remote. - if git2go.IsErrorCode(err, git2go.ErrorCodeExists) { - remote, err = repo.Remotes.Lookup(defaultRemoteName) - if err != nil { - repo.Free() - return nil, nil, fmt.Errorf("unable to create or lookup remote '%s'", defaultRemoteName) - } - - if remote.Url() != transportOptsURL { - err = repo.Remotes.SetUrl("origin", transportOptsURL) - if err != nil { - repo.Free() - remote.Free() - return nil, nil, fmt.Errorf("unable to configure remote %s origin with url %s", defaultRemoteName, url) - } - - // refresh the remote - remote, err = repo.Remotes.Lookup(defaultRemoteName) - if err != nil { - repo.Free() - return nil, nil, fmt.Errorf("unable to create or lookup remote '%s'", defaultRemoteName) - } - } - } else { - repo.Free() - return nil, nil, fmt.Errorf("unable to create remote for '%s': %w", url, gitutil.LibGit2Error(err)) - } - } - return repo, remote, nil -} - -// registerManagedTransportOptions registers the given url and it's transport options. -// Callers must make sure to call `managed.RemoveTransportOptions()` to avoid increase in -// memory consumption. -// We store the target URL, auth options, etc. mapped to TransporOptsURL because managed transports -// don't provide a way for any kind of dependency injection. -// This lets us have a way of doing interop between application level code and transport level code -// which enables us to fetch the required credentials, context, etc. at the transport level. -func registerManagedTransportOptions(ctx context.Context, url string, authOpts *git.AuthOptions) error { - if authOpts == nil { - return errors.New("can't checkout using libgit2 with an empty set of auth options") - } - if authOpts.TransportOptionsURL == "" { - return errors.New("can't checkout using libgit2 without a valid transport auth id") - } - managed.AddTransportOptions(authOpts.TransportOptionsURL, managed.TransportOptions{ - TargetURL: url, - AuthOpts: authOpts, - ProxyOptions: &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}, - Context: ctx, - }) - return nil -} - -func recoverPanic(err *error) { - if r := recover(); r != nil { - *err = fmt.Errorf("recovered from git2go panic: %v", r) - } -} diff --git a/pkg/git/libgit2/checkout_ssh_test.go b/pkg/git/libgit2/checkout_ssh_test.go deleted file mode 100644 index 3eb5b34c0..000000000 --- a/pkg/git/libgit2/checkout_ssh_test.go +++ /dev/null @@ -1,449 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package libgit2 - -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/fluxcd/gitkit" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/ssh" - - . "github.com/onsi/gomega" - cryptossh "golang.org/x/crypto/ssh" - - "github.com/fluxcd/source-controller/pkg/git" -) - -const testRepositoryPath = "../testdata/git/repo" - -// Test_ssh_keyTypes assures support for the different -// types of keys for SSH Authentication supported by Flux. -func Test_ssh_keyTypes(t *testing.T) { - tests := []struct { - name string - keyType ssh.KeyPairType - authorized bool - wantErr string - }{ - { - name: "RSA 4096", - keyType: ssh.RSA_4096, - authorized: true, - }, - { - name: "ECDSA P256", - keyType: ssh.ECDSA_P256, - authorized: true, - }, - { - name: "ECDSA P384", - keyType: ssh.ECDSA_P384, - authorized: true, - }, - { - name: "ECDSA P521", - keyType: ssh.ECDSA_P521, - authorized: true, - }, - { - name: "ED25519", - keyType: ssh.ED25519, - authorized: true, - }, - { - name: "unauthorized key", - keyType: ssh.RSA_4096, - wantErr: "unable to authenticate, attempted methods [none publickey], no supported methods remain", - }, - } - - serverRootDir := t.TempDir() - server := gittestserver.NewGitServer(serverRootDir) - - // Auth needs to be called, for authentication to be enabled. - server.Auth("", "") - - var authorizedPublicKey string - server.PublicKeyLookupFunc(func(content string) (*gitkit.PublicKey, error) { - authedKey := strings.TrimSuffix(string(authorizedPublicKey), "\n") - if authedKey == content { - return &gitkit.PublicKey{Content: content}, nil - } - return nil, fmt.Errorf("pubkey provided '%s' does not match %s", content, authedKey) - }) - - g := NewWithT(t) - timeout := 5 * time.Second - - server.KeyDir(filepath.Join(server.Root(), "keys")) - g.Expect(server.ListenSSH()).To(Succeed()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - repoPath := "test.git" - err := server.InitRepo(testRepositoryPath, git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - sshURL := server.SSHAddress() - repoURL := sshURL + "/" + repoPath - - // Fetch host key. - u, err := url.Parse(sshURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) - g.Expect(err).ToNot(HaveOccurred()) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - // Generate ssh keys based on key type. - kp, err := ssh.GenerateKeyPair(tt.keyType) - g.Expect(err).ToNot(HaveOccurred()) - - // Update authorized key to ensure only the new key is valid on the server. - if tt.authorized { - authorizedPublicKey = string(kp.PublicKey) - } - - authOpts := &git.AuthOptions{ - Identity: kp.PrivateKey, - KnownHosts: knownHosts, - } - authOpts.TransportOptionsURL = getTransportOptionsURL(git.SSH) - - // Prepare for checkout. - branchCheckoutStrat := &CheckoutBranch{Branch: git.DefaultBranch} - tmpDir := t.TempDir() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - // Checkout the repo. - commit, err := branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) - - if tt.wantErr == "" { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(commit).ToNot(BeNil()) - - // Confirm checkout actually happened. - d, err := os.ReadDir(tmpDir) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(d).To(HaveLen(2)) // .git and foo.txt - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).Should(ContainSubstring(tt.wantErr)) - } - }) - } -} - -// Test_ssh_keyExchangeAlgos assures support for the different -// types of SSH key exchange algorithms supported by Flux. -func Test_ssh_keyExchangeAlgos(t *testing.T) { - tests := []struct { - name string - ClientKex []string - ServerKex []string - wantErr string - }{ - { - name: "support for kex: diffie-hellman-group14-sha1", - ClientKex: []string{"diffie-hellman-group14-sha1"}, - ServerKex: []string{"diffie-hellman-group14-sha1"}, - }, - { - name: "support for kex: diffie-hellman-group14-sha256", - ClientKex: []string{"diffie-hellman-group14-sha256"}, - ServerKex: []string{"diffie-hellman-group14-sha256"}, - }, - { - name: "support for kex: curve25519-sha256", - ClientKex: []string{"curve25519-sha256"}, - ServerKex: []string{"curve25519-sha256"}, - }, - { - name: "support for kex: ecdh-sha2-nistp256", - ClientKex: []string{"ecdh-sha2-nistp256"}, - ServerKex: []string{"ecdh-sha2-nistp256"}, - }, - { - name: "support for kex: ecdh-sha2-nistp384", - ClientKex: []string{"ecdh-sha2-nistp384"}, - ServerKex: []string{"ecdh-sha2-nistp384"}, - }, - { - name: "support for kex: ecdh-sha2-nistp521", - ClientKex: []string{"ecdh-sha2-nistp521"}, - ServerKex: []string{"ecdh-sha2-nistp521"}, - }, - { - name: "support for kex: curve25519-sha256@libssh.org", - ClientKex: []string{"curve25519-sha256@libssh.org"}, - ServerKex: []string{"curve25519-sha256@libssh.org"}, - }, - { - name: "non-matching kex", - ClientKex: []string{"ecdh-sha2-nistp521"}, - ServerKex: []string{"curve25519-sha256@libssh.org"}, - wantErr: "ssh: no common algorithm for key exchange; client offered: [ecdh-sha2-nistp521 ext-info-c], server offered: [curve25519-sha256@libssh.org]", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - timeout := 5 * time.Second - - serverRootDir := t.TempDir() - server := gittestserver.NewGitServer(serverRootDir).WithSSHConfig(&cryptossh.ServerConfig{ - Config: cryptossh.Config{ - KeyExchanges: tt.ServerKex, - }, - }) - - // Set what Client Key Exchange Algos to send - git.KexAlgos = tt.ClientKex - - server.KeyDir(filepath.Join(server.Root(), "keys")) - g.Expect(server.ListenSSH()).To(Succeed()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - repoPath := "test.git" - - err := server.InitRepo(testRepositoryPath, git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - sshURL := server.SSHAddress() - repoURL := sshURL + "/" + repoPath - - // Fetch host key. - u, err := url.Parse(sshURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) - g.Expect(err).ToNot(HaveOccurred()) - - // No authentication is required for this test, but it is - // used here to make the Checkout logic happy. - kp, err := ssh.GenerateKeyPair(ssh.ED25519) - g.Expect(err).ToNot(HaveOccurred()) - - authOpts := &git.AuthOptions{ - Identity: kp.PrivateKey, - KnownHosts: knownHosts, - } - authOpts.TransportOptionsURL = getTransportOptionsURL(git.SSH) - - // Prepare for checkout. - branchCheckoutStrat := &CheckoutBranch{Branch: git.DefaultBranch} - tmpDir := t.TempDir() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - // Checkout the repo. - _, err = branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) - if tt.wantErr != "" { - g.Expect(err).Error().Should(HaveOccurred()) - g.Expect(err.Error()).Should(ContainSubstring(tt.wantErr)) - } else { - g.Expect(err).Error().ShouldNot(HaveOccurred()) - } - }) - } -} - -// Test_ssh_hostKeyAlgos assures support for the different -// types of SSH Host Key algorithms supported by Flux. -func Test_ssh_hostKeyAlgos(t *testing.T) { - tests := []struct { - name string - keyType ssh.KeyPairType - ClientHostKeyAlgos []string - hashHostNames bool - }{ - { - name: "support for hostkey: ssh-rsa", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"ssh-rsa"}, - }, - { - name: "support for hostkey: rsa-sha2-256", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-256"}, - }, - { - name: "support for hostkey: rsa-sha2-512", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-512"}, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp256", - keyType: ssh.ECDSA_P256, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp256"}, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp384", - keyType: ssh.ECDSA_P384, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp384"}, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp521", - keyType: ssh.ECDSA_P521, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp521"}, - }, - { - name: "support for hostkey: ssh-ed25519", - keyType: ssh.ED25519, - ClientHostKeyAlgos: []string{"ssh-ed25519"}, - }, - { - name: "support for hostkey: ssh-rsa with hashed host names", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"ssh-rsa"}, - hashHostNames: true, - }, - { - name: "support for hostkey: rsa-sha2-256 with hashed host names", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-256"}, - hashHostNames: true, - }, - { - name: "support for hostkey: rsa-sha2-512 with hashed host names", - keyType: ssh.RSA_4096, - ClientHostKeyAlgos: []string{"rsa-sha2-512"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp256 with hashed host names", - keyType: ssh.ECDSA_P256, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp256"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp384 with hashed host names", - keyType: ssh.ECDSA_P384, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp384"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ecdsa-sha2-nistp521 with hashed host names", - keyType: ssh.ECDSA_P521, - ClientHostKeyAlgos: []string{"ecdsa-sha2-nistp521"}, - hashHostNames: true, - }, - { - name: "support for hostkey: ssh-ed25519 with hashed host names", - keyType: ssh.ED25519, - ClientHostKeyAlgos: []string{"ssh-ed25519"}, - hashHostNames: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - timeout := 5 * time.Second - - sshConfig := &cryptossh.ServerConfig{} - - // Generate new keypair for the server to use for HostKeys. - hkp, err := ssh.GenerateKeyPair(tt.keyType) - g.Expect(err).NotTo(HaveOccurred()) - p, err := cryptossh.ParseRawPrivateKey(hkp.PrivateKey) - g.Expect(err).NotTo(HaveOccurred()) - - // Add key to server. - signer, err := cryptossh.NewSignerFromKey(p) - g.Expect(err).NotTo(HaveOccurred()) - sshConfig.AddHostKey(signer) - - serverRootDir := t.TempDir() - server := gittestserver.NewGitServer(serverRootDir).WithSSHConfig(sshConfig) - - // Set what HostKey Algos will be accepted from a client perspective. - git.HostKeyAlgos = tt.ClientHostKeyAlgos - - keyDir := filepath.Join(server.Root(), "keys") - server.KeyDir(keyDir) - g.Expect(server.ListenSSH()).To(Succeed()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - repoPath := "test.git" - - err = server.InitRepo(testRepositoryPath, git.DefaultBranch, repoPath) - g.Expect(err).NotTo(HaveOccurred()) - - sshURL := server.SSHAddress() - repoURL := sshURL + "/" + repoPath - - // Fetch host key. - u, err := url.Parse(sshURL) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - - knownHosts, err := ssh.ScanHostKey(u.Host, timeout, tt.ClientHostKeyAlgos, tt.hashHostNames) - g.Expect(err).ToNot(HaveOccurred()) - - // No authentication is required for this test, but it is - // used here to make the Checkout logic happy. - kp, err := ssh.GenerateKeyPair(ssh.ED25519) - g.Expect(err).ToNot(HaveOccurred()) - - authOpts := &git.AuthOptions{ - Identity: kp.PrivateKey, - KnownHosts: knownHosts, - } - authOpts.TransportOptionsURL = getTransportOptionsURL(git.SSH) - - // Prepare for checkout. - branchCheckoutStrat := &CheckoutBranch{Branch: git.DefaultBranch} - tmpDir := t.TempDir() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - // Checkout the repo. - _, err = branchCheckoutStrat.Checkout(ctx, tmpDir, repoURL, authOpts) - g.Expect(err).Error().ShouldNot(HaveOccurred()) - }) - } -} diff --git a/pkg/git/libgit2/checkout_test.go b/pkg/git/libgit2/checkout_test.go deleted file mode 100644 index 0f9bb316e..000000000 --- a/pkg/git/libgit2/checkout_test.go +++ /dev/null @@ -1,707 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package libgit2 - -import ( - "context" - "errors" - "fmt" - "math/rand" - "os" - "path/filepath" - "testing" - "time" - - "github.com/fluxcd/pkg/gittestserver" - git2go "github.com/libgit2/git2go/v33" - . "github.com/onsi/gomega" - - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" -) - -func TestMain(m *testing.M) { - err := managed.InitManagedTransport() - if err != nil { - panic(fmt.Sprintf("failed to initialize libgit2 managed transport: %s", err)) - } - code := m.Run() - os.Exit(code) -} - -func TestCheckoutBranch_Checkout(t *testing.T) { - // we use a HTTP Git server instead of a bare repo (for all tests in this - // package), because our managed transports don't support the file protocol, - // so we wouldn't actually be using our custom transports, if we used a bare - // repo. - server, err := gittestserver.NewTempGitServer() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(server.Root()) - - err = server.StartHTTP() - if err != nil { - t.Fatal(err) - } - defer server.StopHTTP() - - repoPath := "test.git" - err = server.InitRepo("../testdata/git/repo", git.DefaultBranch, repoPath) - if err != nil { - t.Fatal(err) - } - - repo, err := git2go.OpenRepository(filepath.Join(server.Root(), repoPath)) - if err != nil { - t.Fatal(err) - } - defer repo.Free() - - defaultBranch := "master" - - firstCommit, err := commitFile(repo, "branch", "init", time.Now()) - if err != nil { - t.Fatal(err) - } - - // Branch off on first commit - if err = createBranch(repo, "test", nil); err != nil { - t.Fatal(err) - } - - // Create second commit on default branch - secondCommit, err := commitFile(repo, "branch", "second", time.Now()) - if err != nil { - t.Fatal(err) - } - repoURL := server.HTTPAddress() + "/" + repoPath - - tests := []struct { - name string - branch string - filesCreated map[string]string - lastRevision string - expectedCommit string - expectedConcreteCommit bool - expectedErr string - }{ - { - name: "Default branch", - branch: defaultBranch, - filesCreated: map[string]string{"branch": "second"}, - expectedCommit: secondCommit.String(), - expectedConcreteCommit: true, - }, - { - name: "Other branch", - branch: "test", - filesCreated: map[string]string{"branch": "init"}, - expectedCommit: firstCommit.String(), - expectedConcreteCommit: true, - }, - { - name: "Non existing branch", - branch: "invalid", - expectedErr: "reference 'refs/remotes/origin/invalid' not found", - expectedConcreteCommit: true, - }, - { - name: "skip clone - lastRevision hasn't changed", - branch: defaultBranch, - filesCreated: map[string]string{"branch": "second"}, - lastRevision: fmt.Sprintf("%s/%s", defaultBranch, secondCommit.String()), - expectedCommit: secondCommit.String(), - expectedConcreteCommit: false, - }, - { - name: "lastRevision is different", - branch: defaultBranch, - filesCreated: map[string]string{"branch": "second"}, - lastRevision: fmt.Sprintf("%s/%s", defaultBranch, firstCommit.String()), - expectedCommit: secondCommit.String(), - expectedConcreteCommit: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - branch := CheckoutBranch{ - Branch: tt.branch, - LastRevision: tt.lastRevision, - } - - tmpDir := t.TempDir() - authOpts := git.AuthOptions{ - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - - cc, err := branch.Checkout(context.TODO(), tmpDir, repoURL, &authOpts) - if tt.expectedErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) - g.Expect(cc).To(BeNil()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.branch + "/" + tt.expectedCommit)) - g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectedConcreteCommit)) - - if tt.expectedConcreteCommit { - for k, v := range tt.filesCreated { - g.Expect(filepath.Join(tmpDir, k)).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, k))).To(BeEquivalentTo(v)) - } - } - }) - } -} - -func TestCheckoutTag_Checkout(t *testing.T) { - type testTag struct { - name string - annotated bool - } - - tests := []struct { - name string - tagsInRepo []testTag - checkoutTag string - lastRevTag string - expectErr string - expectConcreteCommit bool - }{ - { - name: "Tag", - tagsInRepo: []testTag{{"tag-1", false}}, - checkoutTag: "tag-1", - expectConcreteCommit: true, - }, - { - name: "Annotated", - tagsInRepo: []testTag{{"annotated", true}}, - checkoutTag: "annotated", - expectConcreteCommit: true, - }, - { - name: "Non existing tag", - checkoutTag: "invalid", - expectErr: "unable to find 'invalid': no reference found for shorthand 'invalid'", - }, - { - name: "Skip clone - last revision unchanged", - tagsInRepo: []testTag{{"tag-1", false}}, - checkoutTag: "tag-1", - lastRevTag: "tag-1", - expectConcreteCommit: false, - }, - { - name: "Last revision changed", - tagsInRepo: []testTag{{"tag-1", false}, {"tag-2", false}}, - checkoutTag: "tag-2", - lastRevTag: "tag-1", - expectConcreteCommit: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(server.Root()) - - err = server.StartHTTP() - g.Expect(err).ToNot(HaveOccurred()) - defer server.StopHTTP() - - repoPath := "test.git" - err = server.InitRepo("../testdata/git/repo", git.DefaultBranch, repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - repo, err := git2go.OpenRepository(filepath.Join(server.Root(), repoPath)) - g.Expect(err).ToNot(HaveOccurred()) - defer repo.Free() - - // Collect tags and their associated commit for later reference. - tagCommits := map[string]*git2go.Commit{} - - repoURL := server.HTTPAddress() + "/" + repoPath - - // Populate the repo with commits and tags. - if tt.tagsInRepo != nil { - for _, tr := range tt.tagsInRepo { - var commit *git2go.Commit - c, err := commitFile(repo, "tag", tr.name, time.Now()) - if err != nil { - t.Fatal(err) - } - if commit, err = repo.LookupCommit(c); err != nil { - t.Fatal(err) - } - _, err = tag(repo, commit.Id(), tr.annotated, tr.name, time.Now()) - if err != nil { - t.Fatal(err) - } - tagCommits[tr.name] = commit - } - } - - checkoutTag := CheckoutTag{ - Tag: tt.checkoutTag, - } - // If last revision is provided, configure it. - if tt.lastRevTag != "" { - lc := tagCommits[tt.lastRevTag] - checkoutTag.LastRevision = fmt.Sprintf("%s/%s", tt.lastRevTag, lc.Id().String()) - } - - tmpDir := t.TempDir() - - authOpts := git.AuthOptions{ - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - cc, err := checkoutTag.Checkout(context.TODO(), tmpDir, repoURL, &authOpts) - if tt.expectErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.expectErr)) - g.Expect(cc).To(BeNil()) - return - } - - // Check successful checkout results. - targetTagCommit := tagCommits[tt.checkoutTag] - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.checkoutTag + "/" + targetTagCommit.Id().String())) - g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectConcreteCommit)) - - // Check file content only when there's an actual checkout. - if tt.lastRevTag != tt.checkoutTag { - g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.checkoutTag)) - } - }) - } -} - -func TestCheckoutCommit_Checkout(t *testing.T) { - g := NewWithT(t) - - server, err := gittestserver.NewTempGitServer() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(server.Root()) - - err = server.StartHTTP() - if err != nil { - t.Fatal(err) - } - defer server.StopHTTP() - - repoPath := "test.git" - err = server.InitRepo("../testdata/git/repo", git.DefaultBranch, repoPath) - if err != nil { - t.Fatal(err) - } - - repo, err := git2go.OpenRepository(filepath.Join(server.Root(), repoPath)) - if err != nil { - t.Fatal(err) - } - defer repo.Free() - - c, err := commitFile(repo, "commit", "init", time.Now()) - if err != nil { - t.Fatal(err) - } - if _, err = commitFile(repo, "commit", "second", time.Now()); err != nil { - t.Fatal(err) - } - tmpDir := t.TempDir() - authOpts := git.AuthOptions{ - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - repoURL := server.HTTPAddress() + "/" + repoPath - - commit := CheckoutCommit{ - Commit: c.String(), - } - - cc, err := commit.Checkout(context.TODO(), tmpDir, repoURL, &authOpts) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc).ToNot(BeNil()) - g.Expect(cc.String()).To(Equal("HEAD/" + c.String())) - g.Expect(filepath.Join(tmpDir, "commit")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "commit"))).To(BeEquivalentTo("init")) - - commit = CheckoutCommit{ - Commit: "4dc3185c5fc94eb75048376edeb44571cece25f4", - } - tmpDir2 := t.TempDir() - - cc, err = commit.Checkout(context.TODO(), tmpDir2, repoURL, &authOpts) - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(HavePrefix("git checkout error: git commit '4dc3185c5fc94eb75048376edeb44571cece25f4' not found:")) - g.Expect(cc).To(BeNil()) -} - -func TestCheckoutSemVer_Checkout(t *testing.T) { - g := NewWithT(t) - now := time.Now() - - tags := []struct { - tag string - annotated bool - commitTime time.Time - tagTime time.Time - }{ - { - tag: "v0.0.1", - annotated: false, - commitTime: now, - }, - { - tag: "v0.1.0+build-1", - annotated: true, - commitTime: now.Add(10 * time.Minute), - tagTime: now.Add(2 * time.Hour), // This should be ignored during TS comparisons - }, - { - tag: "v0.1.0+build-2", - annotated: false, - commitTime: now.Add(30 * time.Minute), - }, - { - tag: "v0.1.0+build-3", - annotated: true, - commitTime: now.Add(1 * time.Hour), - tagTime: now.Add(1 * time.Hour), // This should be ignored during TS comparisons - }, - { - tag: "0.2.0", - annotated: true, - commitTime: now, - tagTime: now, - }, - } - tests := []struct { - name string - constraint string - expectErr error - expectTag string - }{ - { - name: "Orders by SemVer", - constraint: ">0.1.0", - expectTag: "0.2.0", - }, - { - name: "Orders by SemVer and timestamp", - constraint: "<0.2.0", - expectTag: "v0.1.0+build-3", - }, - { - name: "Errors without match", - constraint: ">=1.0.0", - expectErr: errors.New("no match found for semver: >=1.0.0"), - }, - } - - server, err := gittestserver.NewTempGitServer() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(server.Root()) - - err = server.StartHTTP() - if err != nil { - t.Fatal(err) - } - defer server.StopHTTP() - - repoPath := "test.git" - err = server.InitRepo("../testdata/git/repo", git.DefaultBranch, repoPath) - if err != nil { - t.Fatal(err) - } - - repo, err := git2go.OpenRepository(filepath.Join(server.Root(), repoPath)) - if err != nil { - t.Fatal(err) - } - defer repo.Free() - repoURL := server.HTTPAddress() + "/" + repoPath - - refs := make(map[string]string, len(tags)) - for _, tt := range tags { - ref, err := commitFile(repo, "tag", tt.tag, tt.commitTime) - if err != nil { - t.Fatal(err) - } - commit, err := repo.LookupCommit(ref) - if err != nil { - t.Fatal(err) - } - defer commit.Free() - refs[tt.tag] = commit.Id().String() - _, err = tag(repo, ref, tt.annotated, tt.tag, tt.tagTime) - if err != nil { - t.Fatal(err) - } - } - - c, err := repo.Tags.List() - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(c).To(HaveLen(len(tags))) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - semVer := CheckoutSemVer{ - SemVer: tt.constraint, - } - - tmpDir := t.TempDir() - authOpts := git.AuthOptions{ - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - - cc, err := semVer.Checkout(context.TODO(), tmpDir, repoURL, &authOpts) - if tt.expectErr != nil { - g.Expect(err).To(Equal(tt.expectErr)) - g.Expect(cc).To(BeNil()) - return - } - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.expectTag + "/" + refs[tt.expectTag])) - g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.expectTag)) - }) - } -} - -func Test_initializeRepoWithRemote(t *testing.T) { - g := NewWithT(t) - - tmp := t.TempDir() - ctx := context.TODO() - testRepoURL := "https://example.com/foo/bar" - testRepoURL2 := "https://example.com/foo/baz" - authOpts, err := git.AuthOptionsWithoutSecret(testRepoURL) - g.Expect(err).ToNot(HaveOccurred()) - authOpts.TransportOptionsURL = "https://bar123" - authOpts2, err := git.AuthOptionsWithoutSecret(testRepoURL2) - g.Expect(err).ToNot(HaveOccurred()) - authOpts2.TransportOptionsURL = "https://baz789" - - // Fresh initialization. - repo, remote, err := initializeRepoWithRemote(ctx, tmp, testRepoURL, authOpts) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(repo.IsBare()).To(BeFalse()) - g.Expect(remote.Name()).To(Equal(defaultRemoteName)) - g.Expect(remote.Url()).To(Equal(authOpts.TransportOptionsURL)) - remote.Free() - repo.Free() - - // Reinitialize to ensure it reuses the existing origin. - repo, remote, err = initializeRepoWithRemote(ctx, tmp, testRepoURL, authOpts) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(repo.IsBare()).To(BeFalse()) - g.Expect(remote.Name()).To(Equal(defaultRemoteName)) - g.Expect(remote.Url()).To(Equal(authOpts.TransportOptionsURL)) - remote.Free() - repo.Free() - - // Reinitialize with a different remote URL for existing origin. - repo, remote, err = initializeRepoWithRemote(ctx, tmp, testRepoURL2, authOpts2) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(repo.IsBare()).To(BeFalse()) - g.Expect(remote.Name()).To(Equal(defaultRemoteName)) - g.Expect(remote.Url()).To(Equal(authOpts2.TransportOptionsURL)) - remote.Free() - repo.Free() -} - -func TestCheckoutStrategyForOptions(t *testing.T) { - tests := []struct { - name string - opts git.CheckoutOptions - expectedStrat git.CheckoutStrategy - }{ - { - name: "commit works", - opts: git.CheckoutOptions{ - Commit: "commit", - }, - expectedStrat: &CheckoutCommit{ - Commit: "commit", - }, - }, - { - name: "semver works", - opts: git.CheckoutOptions{ - SemVer: ">= 1.0.0", - }, - expectedStrat: &CheckoutSemVer{ - SemVer: ">= 1.0.0", - }, - }, - { - name: "tag with latest revision works", - opts: git.CheckoutOptions{ - Tag: "v0.1.0", - LastRevision: "ar34oi2njrngjrng", - }, - expectedStrat: &CheckoutTag{ - Tag: "v0.1.0", - LastRevision: "ar34oi2njrngjrng", - }, - }, - { - name: "branch with latest revision works", - opts: git.CheckoutOptions{ - Branch: "main", - LastRevision: "rrgij20mkmrg", - }, - expectedStrat: &CheckoutBranch{ - Branch: "main", - LastRevision: "rrgij20mkmrg", - }, - }, - { - name: "empty branch falls back to default", - opts: git.CheckoutOptions{}, - expectedStrat: &CheckoutBranch{ - Branch: git.DefaultBranch, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - strat := CheckoutStrategyForOptions(context.TODO(), tt.opts) - g.Expect(strat).To(Equal(tt.expectedStrat)) - }) - } -} - -func initBareRepo(t *testing.T) (*git2go.Repository, error) { - tmpDir := t.TempDir() - repo, err := git2go.InitRepository(tmpDir, true) - if err != nil { - return nil, err - } - return repo, nil -} - -func createBranch(repo *git2go.Repository, branch string, commit *git2go.Commit) error { - if commit == nil { - var err error - commit, err = headCommit(repo) - if err != nil { - return err - } - defer commit.Free() - } - _, err := repo.CreateBranch(branch, commit, false) - return err -} - -func commitFile(repo *git2go.Repository, path, content string, time time.Time) (*git2go.Oid, error) { - var parentC []*git2go.Commit - head, err := headCommit(repo) - if err == nil { - defer head.Free() - parentC = append(parentC, head) - } - - index, err := repo.Index() - if err != nil { - return nil, err - } - defer index.Free() - - blobOID, err := repo.CreateBlobFromBuffer([]byte(content)) - if err != nil { - return nil, err - } - - entry := &git2go.IndexEntry{ - Mode: git2go.FilemodeBlob, - Id: blobOID, - Path: path, - } - - if err := index.Add(entry); err != nil { - return nil, err - } - if err := index.Write(); err != nil { - return nil, err - } - - treeID, err := index.WriteTree() - if err != nil { - return nil, err - } - - tree, err := repo.LookupTree(treeID) - if err != nil { - return nil, err - } - defer tree.Free() - - c, err := repo.CreateCommit("HEAD", mockSignature(time), mockSignature(time), "Committing "+path, tree, parentC...) - if err != nil { - return nil, err - } - return c, nil -} - -func tag(repo *git2go.Repository, cId *git2go.Oid, annotated bool, tag string, time time.Time) (*git2go.Oid, error) { - commit, err := repo.LookupCommit(cId) - if err != nil { - return nil, err - } - if annotated { - return repo.Tags.Create(tag, commit, mockSignature(time), fmt.Sprintf("Annotated tag for %s", tag)) - } - return repo.Tags.CreateLightweight(tag, commit, false) -} - -func mockSignature(time time.Time) *git2go.Signature { - return &git2go.Signature{ - Name: "Jane Doe", - Email: "author@example.com", - When: time, - } -} - -func getTransportOptionsURL(transport git.TransportType) string { - letterRunes := []rune("abcdefghijklmnopqrstuvwxyz1234567890") - b := make([]rune, 10) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(transport) + "://" + string(b) -} diff --git a/pkg/git/libgit2/managed/http.go b/pkg/git/libgit2/managed/http.go deleted file mode 100644 index e79aefd47..000000000 --- a/pkg/git/libgit2/managed/http.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -This was inspired and contains part of: -https://github.com/libgit2/git2go/blob/eae00773cce87d5282a8ac7c10b5c1961ee6f9cb/http.go - -The MIT License - -Copyright (c) 2013 The git2go contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -package managed - -import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/fluxcd/pkg/runtime/logger" - pool "github.com/fluxcd/source-controller/internal/transport" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/go-logr/logr" - git2go "github.com/libgit2/git2go/v33" - ctrl "sigs.k8s.io/controller-runtime" -) - -var actionSuffixes = []string{ - "/info/refs?service=git-upload-pack", - "/git-upload-pack", - "/info/refs?service=git-receive-pack", - "/git-receive-pack", -} - -// registerManagedHTTP registers a Go-native implementation of an -// HTTP(S) transport that doesn't rely on any lower-level libraries -// such as OpenSSL. -func registerManagedHTTP() error { - for _, protocol := range []string{"http", "https"} { - _, err := git2go.NewRegisteredSmartTransport(protocol, true, httpSmartSubtransportFactory) - if err != nil { - return fmt.Errorf("failed to register transport for %q: %v", protocol, err) - } - } - return nil -} - -func httpSmartSubtransportFactory(remote *git2go.Remote, transport *git2go.Transport) (git2go.SmartSubtransport, error) { - sst := &httpSmartSubtransport{ - transport: transport, - httpTransport: pool.NewOrIdle(nil), - ctx: context.Background(), - logger: logr.Discard(), - } - - return sst, nil -} - -type httpSmartSubtransport struct { - transport *git2go.Transport - httpTransport *http.Transport - - // once is used to ensure that logger and ctx is set only once, - // on the initial (or only) Action call. Without this a mutex must - // be applied to ensure that ctx won't be changed, as this would be - // prone to race conditions in the stdout processing goroutine. - once sync.Once - // ctx defines the context to be used across long-running or - // cancellable operations. - // Defaults to context.Background(). - ctx context.Context - // logger keeps a Logger instance for logging. This was preferred - // due to the need to have a correlation ID and URL set and - // reused across all log calls. - // If context is not set, this defaults to logr.Discard(). - logger logr.Logger -} - -func (t *httpSmartSubtransport) Action(transportOptionsURL string, action git2go.SmartServiceAction) (git2go.SmartSubtransportStream, error) { - opts, found := getTransportOptions(transportOptionsURL) - - if !found { - return nil, fmt.Errorf("failed to create client: could not find transport options for the object: %s", transportOptionsURL) - } - targetURL := opts.TargetURL - - if targetURL == "" { - return nil, fmt.Errorf("repository URL cannot be empty") - } - - if len(targetURL) > URLMaxLength { - return nil, fmt.Errorf("URL exceeds the max length (%d)", URLMaxLength) - } - - var proxyFn func(*http.Request) (*url.URL, error) - proxyOpts := opts.ProxyOptions - if proxyOpts != nil { - switch proxyOpts.Type { - case git2go.ProxyTypeNone: - proxyFn = nil - case git2go.ProxyTypeAuto: - proxyFn = http.ProxyFromEnvironment - case git2go.ProxyTypeSpecified: - parsedUrl, err := url.Parse(proxyOpts.Url) - if err != nil { - return nil, err - } - proxyFn = http.ProxyURL(parsedUrl) - } - t.httpTransport.Proxy = proxyFn - t.httpTransport.ProxyConnectHeader = map[string][]string{} - } else { - t.httpTransport.Proxy = nil - } - t.httpTransport.DisableCompression = false - - t.once.Do(func() { - if opts.Context != nil { - t.ctx = opts.Context - t.logger = ctrl.LoggerFrom(t.ctx, - "transportType", "http", - "url", opts.TargetURL) - } - }) - - client, req, err := createClientRequest(targetURL, action, t.httpTransport, opts.AuthOpts) - if err != nil { - return nil, err - } - - stream := newManagedHttpStream(t, req, client) - if req.Method == "POST" { - stream.recvReply.Add(1) - stream.sendRequestBackground() - } - - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - if len(via) >= 3 { - return fmt.Errorf("too many redirects") - } - - // golang will change POST to GET in case of redirects. - if len(via) >= 0 && req.Method != via[0].Method { - if via[0].URL.Scheme == "https" && req.URL.Scheme == "http" { - return fmt.Errorf("downgrade from https to http is not allowed: from %q to %q", via[0].URL.String(), req.URL.String()) - } - if via[0].URL.Host != req.URL.Host { - return fmt.Errorf("cross hosts redirects are not allowed: from %s to %s", via[0].URL.Host, req.URL.Host) - } - - return http.ErrUseLastResponse - } - - // Some Git servers (i.e. Gitlab) only support redirection on the GET operations. - // Therefore, on the initial GET operation we update the target URL to include the - // new target, so the subsequent actions include the correct target URL. - // Example of this is trying to access a Git repository without the .git suffix. - if req.Response != nil { - if newURL, err := req.Response.Location(); err == nil && newURL != nil { - if strings.EqualFold(newURL.Host, req.URL.Host) && strings.EqualFold(newURL.Port(), req.URL.Port()) { - opts, _ := getTransportOptions(transportOptionsURL) - if opts == nil { - opts = &TransportOptions{} - } - - opts.TargetURL = trimActionSuffix(newURL.String()) - AddTransportOptions(transportOptionsURL, *opts) - - // show as info, as this should be visible regardless of the - // chosen log-level. - t.logger.Info("server responded with redirect", - "newUrl", opts.TargetURL, "StatusCode", req.Response.StatusCode) - } - } - } - - return nil - } - - return stream, nil -} - -func trimActionSuffix(url string) string { - newUrl := url - for _, s := range actionSuffixes { - newUrl = strings.TrimSuffix(newUrl, s) - } - - return newUrl -} - -func createClientRequest(targetURL string, action git2go.SmartServiceAction, - t *http.Transport, authOpts *git.AuthOptions) (*http.Client, *http.Request, error) { - var req *http.Request - var err error - - if t == nil { - return nil, nil, fmt.Errorf("failed to create client: transport cannot be nil") - } - - client := &http.Client{ - Transport: t, - Timeout: fullHttpClientTimeOut, - } - - switch action { - case git2go.SmartServiceActionUploadpackLs: - req, err = http.NewRequest("GET", targetURL+"/info/refs?service=git-upload-pack", nil) - - case git2go.SmartServiceActionUploadpack: - req, err = http.NewRequest("POST", targetURL+"/git-upload-pack", nil) - if err != nil { - break - } - req.Header.Set("Content-Type", "application/x-git-upload-pack-request") - if t.Proxy != nil { - t.ProxyConnectHeader.Set("Content-Type", "application/x-git-upload-pack-request") - } - - case git2go.SmartServiceActionReceivepackLs: - req, err = http.NewRequest("GET", targetURL+"/info/refs?service=git-receive-pack", nil) - - case git2go.SmartServiceActionReceivepack: - req, err = http.NewRequest("POST", targetURL+"/git-receive-pack", nil) - if err != nil { - break - } - req.Header.Set("Content-Type", "application/x-git-receive-pack-request") - if t.Proxy != nil { - t.ProxyConnectHeader.Set("Content-Type", "application/x-git-receive-pack-request") - } - - default: - err = errors.New("unknown action") - } - - if err != nil { - return nil, nil, err - } - - // Apply authentication and TLS settings to the HTTP transport. - if authOpts != nil { - if authOpts.Username != "" && authOpts.Password != "" { - req.SetBasicAuth(authOpts.Username, authOpts.Password) - } - if len(authOpts.CAFile) > 0 { - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(authOpts.CAFile); !ok { - return nil, nil, fmt.Errorf("PEM CA bundle could not be appended to x509 certificate pool") - } - t.TLSClientConfig = &tls.Config{ - RootCAs: certPool, - } - } - } - - req.Header.Set("User-Agent", "git/2.0 (flux-libgit2)") - if t.Proxy != nil { - t.ProxyConnectHeader.Set("User-Agent", "git/2.0 (flux-libgit2)") - } - return client, req, nil -} - -func (t *httpSmartSubtransport) Close() error { - t.logger.V(logger.TraceLevel).Info("httpSmartSubtransport.Close()") - return nil -} - -func (t *httpSmartSubtransport) Free() { - t.logger.V(logger.TraceLevel).Info("httpSmartSubtransport.Free()") - - if t.httpTransport != nil { - t.logger.V(logger.TraceLevel).Info("release http transport back to pool") - - pool.Release(t.httpTransport) - t.httpTransport = nil - } -} - -type httpSmartSubtransportStream struct { - owner *httpSmartSubtransport - client *http.Client - req *http.Request - resp *http.Response - reader *io.PipeReader - writer *io.PipeWriter - sentRequest bool - recvReply sync.WaitGroup - httpError error - m sync.RWMutex -} - -func newManagedHttpStream(owner *httpSmartSubtransport, req *http.Request, client *http.Client) *httpSmartSubtransportStream { - r, w := io.Pipe() - return &httpSmartSubtransportStream{ - owner: owner, - client: client, - req: req, - reader: r, - writer: w, - } -} - -func (self *httpSmartSubtransportStream) Read(buf []byte) (int, error) { - if !self.sentRequest { - self.recvReply.Add(1) - if err := self.sendRequest(); err != nil { - return 0, err - } - } - - if err := self.writer.Close(); err != nil { - return 0, err - } - - self.recvReply.Wait() - - self.m.RLock() - err := self.httpError - self.m.RUnlock() - - if err != nil { - return 0, self.httpError - } - return self.resp.Body.Read(buf) -} - -func (self *httpSmartSubtransportStream) Write(buf []byte) (int, error) { - self.m.RLock() - err := self.httpError - self.m.RUnlock() - - if err != nil { - return 0, self.httpError - } - return self.writer.Write(buf) -} - -func (self *httpSmartSubtransportStream) Free() { - if self.resp != nil { - self.owner.logger.V(logger.TraceLevel).Info("httpSmartSubtransportStream.Free()") - - if self.resp.Body != nil { - // ensure body is fully processed and closed - // for increased likelihood of transport reuse in HTTP/1.x. - // it should not be a problem to do this more than once. - if _, err := io.Copy(io.Discard, self.resp.Body); err != nil { - self.owner.logger.V(logger.TraceLevel).Error(err, "cannot discard response body") - } - - if err := self.resp.Body.Close(); err != nil { - self.owner.logger.V(logger.TraceLevel).Error(err, "cannot close response body") - } - } - } -} - -func (self *httpSmartSubtransportStream) sendRequestBackground() { - go func() { - err := self.sendRequest() - - self.m.Lock() - self.httpError = err - self.m.Unlock() - }() - self.sentRequest = true -} - -func (self *httpSmartSubtransportStream) sendRequest() error { - defer self.recvReply.Done() - self.resp = nil - - var resp *http.Response - var err error - var content []byte - - for { - req := &http.Request{ - Method: self.req.Method, - URL: self.req.URL, - Header: self.req.Header, - } - req = req.WithContext(self.owner.ctx) - - if req.Method == "POST" { - if len(content) == 0 { - // a copy of the request body needs to be saved so - // it can be reused in case of redirects. - if content, err = io.ReadAll(self.reader); err != nil { - return err - } - } - req.Body = io.NopCloser(bytes.NewReader(content)) - req.ContentLength = -1 - } - - self.owner.logger.V(logger.TraceLevel).Info("new request", "method", req.Method, "postUrl", req.URL) - resp, err = self.client.Do(req) - if err != nil { - return err - } - - // GET requests will be automatically redirected. - // POST require the new destination, and also the body content. - if req.Method == "POST" && resp.StatusCode >= 301 && resp.StatusCode <= 308 { - // ensure body is fully processed and closed - // for increased likelihood of transport reuse in HTTP/1.x. - _, _ = io.Copy(io.Discard, resp.Body) // errors can be safely ignored - - if err := resp.Body.Close(); err != nil { - return err - } - - // The next try will go against the new destination - self.req.URL, err = resp.Location() - if err != nil { - return err - } - - continue - } - - // for HTTP 200, the response will be cleared up by Free() - if resp.StatusCode == http.StatusOK { - break - } - - // ensure body is fully processed and closed - // for increased likelihood of transport reuse in HTTP/1.x. - _, _ = io.Copy(io.Discard, resp.Body) // errors can be safely ignored - if err := resp.Body.Close(); err != nil { - return err - } - - return fmt.Errorf("unhandled HTTP error %s", resp.Status) - } - - self.resp = resp - self.sentRequest = true - return nil -} diff --git a/pkg/git/libgit2/managed/http_test.go b/pkg/git/libgit2/managed/http_test.go deleted file mode 100644 index fc957cbe3..000000000 --- a/pkg/git/libgit2/managed/http_test.go +++ /dev/null @@ -1,292 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package managed - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - "testing" - - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/source-controller/pkg/git" - . "github.com/onsi/gomega" - - git2go "github.com/libgit2/git2go/v33" -) - -func TestMain(m *testing.M) { - err := InitManagedTransport() - if err != nil { - panic(fmt.Sprintf("failed to initialize libgit2 managed transport: %s", err)) - } - code := m.Run() - os.Exit(code) -} - -func TestHttpAction_CreateClientRequest(t *testing.T) { - authOpts := git.AuthOptions{ - Username: "user", - Password: "pwd", - } - url := "https://final-target/abc" - - tests := []struct { - name string - assertFunc func(g *WithT, req *http.Request, client *http.Client) - action git2go.SmartServiceAction - authOpts git.AuthOptions - transport *http.Transport - wantedErr error - }{ - { - name: "Uploadpack: URL, method and headers are correctly set", - action: git2go.SmartServiceActionUploadpack, - transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - ProxyConnectHeader: map[string][]string{}, - }, - assertFunc: func(g *WithT, req *http.Request, _ *http.Client) { - g.Expect(req.URL.String()).To(Equal("https://final-target/abc/git-upload-pack")) - g.Expect(req.Method).To(Equal("POST")) - g.Expect(req.Header).To(BeEquivalentTo(map[string][]string{ - "User-Agent": {"git/2.0 (flux-libgit2)"}, - "Content-Type": {"application/x-git-upload-pack-request"}, - })) - }, - wantedErr: nil, - }, - { - name: "UploadpackLs: URL, method and headers are correctly set", - action: git2go.SmartServiceActionUploadpackLs, - transport: &http.Transport{}, - assertFunc: func(g *WithT, req *http.Request, _ *http.Client) { - g.Expect(req.URL.String()).To(Equal("https://final-target/abc/info/refs?service=git-upload-pack")) - g.Expect(req.Method).To(Equal("GET")) - g.Expect(req.Header).To(BeEquivalentTo(map[string][]string{ - "User-Agent": {"git/2.0 (flux-libgit2)"}, - })) - }, - wantedErr: nil, - }, - { - name: "Receivepack: URL, method and headers are correctly set", - action: git2go.SmartServiceActionReceivepack, - transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - ProxyConnectHeader: map[string][]string{}, - }, - assertFunc: func(g *WithT, req *http.Request, _ *http.Client) { - g.Expect(req.URL.String()).To(Equal("https://final-target/abc/git-receive-pack")) - g.Expect(req.Method).To(Equal("POST")) - g.Expect(req.Header).To(BeEquivalentTo(map[string][]string{ - "Content-Type": {"application/x-git-receive-pack-request"}, - "User-Agent": {"git/2.0 (flux-libgit2)"}, - })) - }, - wantedErr: nil, - }, - { - name: "ReceivepackLs: URL, method and headars are correctly set", - action: git2go.SmartServiceActionReceivepackLs, - transport: &http.Transport{}, - assertFunc: func(g *WithT, req *http.Request, _ *http.Client) { - g.Expect(req.URL.String()).To(Equal("https://final-target/abc/info/refs?service=git-receive-pack")) - g.Expect(req.Method).To(Equal("GET")) - g.Expect(req.Header).To(BeEquivalentTo(map[string][]string{ - "User-Agent": {"git/2.0 (flux-libgit2)"}, - })) - }, - wantedErr: nil, - }, - { - name: "incomplete credentials, no basic auth", - action: git2go.SmartServiceActionReceivepackLs, - transport: &http.Transport{}, - authOpts: git.AuthOptions{Username: "user"}, - assertFunc: func(g *WithT, req *http.Request, client *http.Client) { - _, _, ok := req.BasicAuth() - g.Expect(ok).To(BeFalse()) - }, - }, - { - name: "credentials are correctly configured", - action: git2go.SmartServiceActionUploadpack, - transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - ProxyConnectHeader: map[string][]string{}, - }, - authOpts: authOpts, - assertFunc: func(g *WithT, req *http.Request, client *http.Client) { - g.Expect(req.URL.String()).To(Equal("https://final-target/abc/git-upload-pack")) - g.Expect(req.Method).To(Equal("POST")) - - username, pwd, ok := req.BasicAuth() - if !ok { - t.Errorf("could not find Authentication header in request.") - } - g.Expect(username).To(Equal("user")) - g.Expect(pwd).To(Equal("pwd")) - }, - wantedErr: nil, - }, - { - name: "error when no http.transport provided", - action: git2go.SmartServiceActionUploadpack, - transport: nil, - wantedErr: fmt.Errorf("failed to create client: transport cannot be nil"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - client, req, err := createClientRequest(url, tt.action, tt.transport, &tt.authOpts) - if err != nil { - t.Log(err) - } - if tt.wantedErr != nil { - g.Expect(err).To(Equal(tt.wantedErr)) - } else { - tt.assertFunc(g, req, client) - } - - }) - } -} - -func TestHTTP_E2E(t *testing.T) { - g := NewWithT(t) - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(server.Root()) - - user := "test-user" - pwd := "test-pswd" - server.Auth(user, pwd) - server.KeyDir(filepath.Join(server.Root(), "keys")) - - err = server.StartHTTP() - g.Expect(err).ToNot(HaveOccurred()) - defer server.StopHTTP() - - repoPath := "test.git" - err = server.InitRepo("../../testdata/git/repo", git.DefaultBranch, repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - tmpDir := t.TempDir() - - // Register the auth options and target url mapped to a unique url. - id := "http://obj-id" - AddTransportOptions(id, TransportOptions{ - TargetURL: server.HTTPAddress() + "/" + repoPath, - AuthOpts: &git.AuthOptions{ - Username: user, - Password: pwd, - }, - }) - - // We call git2go.Clone with transportOptsURL instead of the actual URL, - // as the transport action will fetch the actual URL and the required - // credentials using the it as an identifier. - repo, err := git2go.Clone(id, tmpDir, &git2go.CloneOptions{ - CheckoutOptions: git2go.CheckoutOptions{ - Strategy: git2go.CheckoutForce, - }, - }) - g.Expect(err).ToNot(HaveOccurred()) - repo.Free() -} - -func TestTrimActionSuffix(t *testing.T) { - tests := []struct { - name string - inURL string - wantURL string - }{ - { - name: "ignore other suffixes", - inURL: "https://gitlab/repo/podinfo.git/somethingelse", - wantURL: "https://gitlab/repo/podinfo.git/somethingelse", - }, - { - name: "trim /info/refs?service=git-upload-pack", - inURL: "https://gitlab/repo/podinfo.git/info/refs?service=git-upload-pack", - wantURL: "https://gitlab/repo/podinfo.git", - }, - { - name: "trim /git-upload-pack", - inURL: "https://gitlab/repo/podinfo.git/git-upload-pack", - wantURL: "https://gitlab/repo/podinfo.git", - }, - { - name: "trim /info/refs?service=git-receive-pack", - inURL: "https://gitlab/repo/podinfo.git/info/refs?service=git-receive-pack", - wantURL: "https://gitlab/repo/podinfo.git", - }, - { - name: "trim /git-receive-pack", - inURL: "https://gitlab/repo/podinfo.git/git-receive-pack", - wantURL: "https://gitlab/repo/podinfo.git", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - gotURL := trimActionSuffix(tt.inURL) - g.Expect(gotURL).To(Equal(tt.wantURL)) - }) - } -} - -func TestHTTP_HandleRedirect(t *testing.T) { - tests := []struct { - name string - repoURL string - }{ - {name: "http to https", repoURL: "http://github.com/stefanprodan/podinfo"}, - {name: "handle gitlab redirect", repoURL: "https://gitlab.com/stefanprodan/podinfo"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - tmpDir := t.TempDir() - - id := "http://obj-id" - AddTransportOptions(id, TransportOptions{ - TargetURL: tt.repoURL, - }) - - // GitHub will cause a 301 and redirect to https - repo, err := git2go.Clone(id, tmpDir, &git2go.CloneOptions{ - CheckoutOptions: git2go.CheckoutOptions{ - Strategy: git2go.CheckoutForce, - }, - }) - - g.Expect(err).ToNot(HaveOccurred()) - repo.Free() - }) - } -} diff --git a/pkg/git/libgit2/managed/init.go b/pkg/git/libgit2/managed/init.go deleted file mode 100644 index f452f1142..000000000 --- a/pkg/git/libgit2/managed/init.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package managed - -import ( - "sync" - "time" -) - -var ( - once sync.Once - - // sshConnectionTimeOut defines the timeout used for when - // creating ssh.ClientConfig, which translates in the timeout - // for stablishing the SSH TCP connections. - sshConnectionTimeOut time.Duration = 30 * time.Second - - // fullHttpClientTimeOut defines the maximum amount of - // time a http client may take before timing out, - // regardless of the current operation (i.e. connection, - // handshake, put/get). - fullHttpClientTimeOut time.Duration = 10 * time.Minute - - enabled bool -) - -// Enabled defines whether the use of Managed Transport is enabled which -// is only true if InitManagedTransport was called successfully at least -// once. -// -// This is only affects git operations that uses libgit2 implementation. -func Enabled() bool { - return enabled -} - -// InitManagedTransport initialises HTTP(S) and SSH managed transport -// for git2go, and therefore only impact git operations using the -// libgit2 implementation. -// -// This must run after git2go.init takes place, hence this is not executed -// within a init(). -// Regardless of the state in libgit2/git2go, this will replace the -// built-in transports. -// -// This function will only register managed transports once, subsequent calls -// leads to no-op. -func InitManagedTransport() error { - var err error - - once.Do(func() { - if err = registerManagedHTTP(); err != nil { - return - } - - if err = registerManagedSSH(); err == nil { - enabled = true - } - }) - - return err -} diff --git a/pkg/git/libgit2/managed/options.go b/pkg/git/libgit2/managed/options.go deleted file mode 100644 index cd9d96b0c..000000000 --- a/pkg/git/libgit2/managed/options.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package managed - -import ( - "context" - "sync" - - "github.com/fluxcd/source-controller/pkg/git" - git2go "github.com/libgit2/git2go/v33" -) - -// TransportOptions represents options to be applied at transport-level -// at request time. -type TransportOptions struct { - TargetURL string - AuthOpts *git.AuthOptions - ProxyOptions *git2go.ProxyOptions - Context context.Context -} - -var ( - // transportOpts maps a unique URL to a set of transport options. - transportOpts = make(map[string]TransportOptions, 0) - m sync.RWMutex -) - -// AddTransportOptions registers a TransportOptions object mapped to the -// provided transportOptsURL, which must be a valid URL, i.e. prefixed with "http://" -// or "ssh://", as it is used as a dummy URL for all git operations and the managed -// transports will only be invoked for the protocols that they have been -// registered for. -func AddTransportOptions(transportOptsURL string, opts TransportOptions) { - m.Lock() - transportOpts[transportOptsURL] = opts - m.Unlock() -} - -// RemoveTransportOptions removes the registerd TransportOptions object -// mapped to the provided id. -func RemoveTransportOptions(transportOptsURL string) { - m.Lock() - delete(transportOpts, transportOptsURL) - m.Unlock() -} - -func getTransportOptions(transportOptsURL string) (*TransportOptions, bool) { - m.RLock() - opts, found := transportOpts[transportOptsURL] - m.RUnlock() - - if found { - return &opts, true - } - return nil, false -} diff --git a/pkg/git/libgit2/managed/options_test.go b/pkg/git/libgit2/managed/options_test.go deleted file mode 100644 index 4f35a0fcd..000000000 --- a/pkg/git/libgit2/managed/options_test.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package managed - -import ( - "testing" - - "github.com/fluxcd/source-controller/pkg/git" - . "github.com/onsi/gomega" -) - -func TestTransportOptions(t *testing.T) { - tests := []struct { - name string - registerOpts bool - url string - opts TransportOptions - expectOpts bool - expectedOpts *TransportOptions - }{ - { - name: "return registered option", - registerOpts: true, - url: "https://target/?123", - opts: TransportOptions{}, - expectOpts: true, - expectedOpts: &TransportOptions{}, - }, - { - name: "match registered options", - registerOpts: true, - url: "https://target/?876", - opts: TransportOptions{ - TargetURL: "https://new-target/321", - AuthOpts: &git.AuthOptions{ - CAFile: []byte{123, 213, 132}, - }, - }, - expectOpts: true, - expectedOpts: &TransportOptions{ - TargetURL: "https://new-target/321", - AuthOpts: &git.AuthOptions{ - CAFile: []byte{123, 213, 132}, - }, - }, - }, - { - name: "ignore when options not registered", - registerOpts: false, - url: "", - opts: TransportOptions{}, - expectOpts: false, - expectedOpts: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - if tt.registerOpts { - AddTransportOptions(tt.url, tt.opts) - } - - opts, found := getTransportOptions(tt.url) - g.Expect(found).To(Equal(found)) - - if tt.expectOpts { - g.Expect(tt.expectedOpts).To(Equal(opts)) - } - - if tt.registerOpts { - RemoveTransportOptions(tt.url) - } - - _, found = getTransportOptions(tt.url) - g.Expect(found).To(BeFalse()) - }) - } -} diff --git a/pkg/git/libgit2/managed/ssh.go b/pkg/git/libgit2/managed/ssh.go deleted file mode 100644 index 5081241bc..000000000 --- a/pkg/git/libgit2/managed/ssh.go +++ /dev/null @@ -1,386 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -This was inspired and contains part of: -https://github.com/libgit2/git2go/blob/eae00773cce87d5282a8ac7c10b5c1961ee6f9cb/ssh.go - -The MIT License - -Copyright (c) 2013 The git2go contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -package managed - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "io" - "net" - "net/url" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/crypto/ssh" - "golang.org/x/net/proxy" - ctrl "sigs.k8s.io/controller-runtime" - - "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/go-logr/logr" - git2go "github.com/libgit2/git2go/v33" -) - -// registerManagedSSH registers a Go-native implementation of -// SSH transport that doesn't rely on any lower-level libraries -// such as libssh2. -func registerManagedSSH() error { - for _, protocol := range []string{"ssh", "ssh+git", "git+ssh"} { - _, err := git2go.NewRegisteredSmartTransport(protocol, false, sshSmartSubtransportFactory) - if err != nil { - return fmt.Errorf("failed to register transport for %q: %v", protocol, err) - } - } - return nil -} - -func sshSmartSubtransportFactory(remote *git2go.Remote, transport *git2go.Transport) (git2go.SmartSubtransport, error) { - var closed int32 = 0 - return &sshSmartSubtransport{ - transport: transport, - ctx: context.Background(), - logger: logr.Discard(), - closedSessions: &closed, - }, nil -} - -type sshSmartSubtransport struct { - transport *git2go.Transport - - // once is used to ensure that logger and ctx is set only once, - // on the initial (or only) Action call. Without this a mutex must - // be applied to ensure that ctx won't be changed, as this would be - // prone to race conditions in the stdout processing goroutine. - once sync.Once - // ctx defines the context to be used across long-running or - // cancellable operations. - // Defaults to context.Background(). - ctx context.Context - // logger keeps a Logger instance for logging. This was preferred - // due to the need to have a correlation ID and Address set and - // reused across all log calls. - // If context is not set, this defaults to logr.Discard(). - logger logr.Logger - - lastAction git2go.SmartServiceAction - stdin io.WriteCloser - stdout io.Reader - - closedSessions *int32 - - client *ssh.Client - session *ssh.Session - currentStream *sshSmartSubtransportStream - connected bool -} - -func (t *sshSmartSubtransport) Action(transportOptionsURL string, action git2go.SmartServiceAction) (git2go.SmartSubtransportStream, error) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - opts, found := getTransportOptions(transportOptionsURL) - if !found { - return nil, fmt.Errorf("could not find transport options for object: %s", transportOptionsURL) - } - - u, err := url.Parse(opts.TargetURL) - if err != nil { - return nil, err - } - - if len(u.Path) > PathMaxLength { - return nil, fmt.Errorf("path exceeds the max length (%d)", PathMaxLength) - } - - // decode URI's path - uPath, err := url.PathUnescape(u.Path) - if err != nil { - return nil, err - } - - // Escape \ and '. - uPath = strings.Replace(uPath, `\`, `\\`, -1) - uPath = strings.Replace(uPath, `'`, `\'`, -1) - - var cmd string - switch action { - case git2go.SmartServiceActionUploadpackLs, git2go.SmartServiceActionUploadpack: - if t.currentStream != nil { - if t.lastAction == git2go.SmartServiceActionUploadpackLs { - return t.currentStream, nil - } - } - cmd = fmt.Sprintf("git-upload-pack '%s'", uPath) - - case git2go.SmartServiceActionReceivepackLs, git2go.SmartServiceActionReceivepack: - if t.currentStream != nil { - if t.lastAction == git2go.SmartServiceActionReceivepackLs { - return t.currentStream, nil - } - } - cmd = fmt.Sprintf("git-receive-pack '%s'", uPath) - - default: - return nil, fmt.Errorf("unexpected action: %v", action) - } - - port := "22" - if u.Port() != "" { - port = u.Port() - } - addr := net.JoinHostPort(u.Hostname(), port) - - t.once.Do(func() { - if opts.Context != nil { - t.ctx = opts.Context - t.logger = ctrl.LoggerFrom(t.ctx, - "transportType", "ssh", - "addr", addr) - } - }) - - sshConfig, err := createClientConfig(opts.AuthOpts) - if err != nil { - return nil, err - } - - sshConfig.HostKeyCallback = func(hostname string, remote net.Addr, key ssh.PublicKey) error { - keyHash := sha256.Sum256(key.Marshal()) - return CheckKnownHost(hostname, opts.AuthOpts.KnownHosts, keyHash[:]) - } - - if t.connected { - // The connection is no longer shared across actions, so ensures - // all has been released before starting a new connection. - _ = t.Close() - } - - err = t.createConn(addr, sshConfig) - if err != nil { - return nil, err - } - - t.logger.V(logger.TraceLevel).Info("creating new ssh session") - if t.session, err = t.client.NewSession(); err != nil { - return nil, err - } - - if t.stdin, err = t.session.StdinPipe(); err != nil { - return nil, err - } - - var w *io.PipeWriter - var reader io.Reader - t.stdout, w = io.Pipe() - if reader, err = t.session.StdoutPipe(); err != nil { - return nil, err - } - - // If the session's stdout pipe is not serviced fast - // enough it may cause the remote command to block. - // - // xref: https://github.com/golang/crypto/blob/eb4f295cb31f7fb5d52810411604a2638c9b19a2/ssh/session.go#L553-L558 - go func() error { - defer func() { - w.Close() - - // In case this goroutine panics, handle recovery. - if r := recover(); r != nil { - t.logger.V(logger.TraceLevel).Error(errors.New(r.(string)), - "recovered from libgit2 ssh smart subtransport panic") - } - }() - var cancel context.CancelFunc - ctx := t.ctx - - // When context is nil, creates a new with internal SSH connection timeout. - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), sshConnectionTimeOut) - defer cancel() - } - - closedAlready := atomic.LoadInt32(t.closedSessions) - for { - select { - case <-ctx.Done(): - t.Close() - return nil - - default: - if atomic.LoadInt32(t.closedSessions) > closedAlready { - return nil - } - - _, err := io.Copy(w, reader) - if err != nil { - return err - } - time.Sleep(5 * time.Millisecond) - } - } - }() - - t.logger.V(logger.TraceLevel).Info("run on remote", "cmd", cmd) - if err := t.session.Start(cmd); err != nil { - return nil, err - } - - t.lastAction = action - t.currentStream = &sshSmartSubtransportStream{ - owner: t, - } - - return t.currentStream, nil -} - -func (t *sshSmartSubtransport) createConn(addr string, sshConfig *ssh.ClientConfig) error { - ctx, cancel := context.WithTimeout(context.TODO(), sshConnectionTimeOut) - defer cancel() - - t.logger.V(logger.TraceLevel).Info("dial connection") - conn, err := proxy.Dial(ctx, "tcp", addr) - if err != nil { - return err - } - c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig) - if err != nil { - return err - } - - t.connected = true - t.client = ssh.NewClient(c, chans, reqs) - - return nil -} - -// Close closes the smart subtransport. -// -// This is called internally ahead of a new action, and also -// upstream by the transport handler: -// https://github.com/libgit2/git2go/blob/0e8009f00a65034d196c67b1cdd82af6f12c34d3/transport.go#L409 -// -// Avoid returning errors, but focus on releasing anything that -// may impair the transport to have successful actions on a new -// SmartSubTransport (i.e. unreleased resources, staled connections). -func (t *sshSmartSubtransport) Close() error { - t.logger.V(logger.TraceLevel).Info("sshSmartSubtransport.Close()") - - t.currentStream = nil - if t.client != nil && t.stdin != nil { - _ = t.stdin.Close() - } - t.stdin = nil - - if t.session != nil { - t.logger.V(logger.TraceLevel).Info("session.Close()") - _ = t.session.Close() - } - t.session = nil - - if t.client != nil { - _ = t.client.Close() - t.logger.V(logger.TraceLevel).Info("close client") - } - t.client = nil - - t.connected = false - atomic.AddInt32(t.closedSessions, 1) - - return nil -} - -func (t *sshSmartSubtransport) Free() { -} - -type sshSmartSubtransportStream struct { - owner *sshSmartSubtransport -} - -func (stream *sshSmartSubtransportStream) Read(buf []byte) (int, error) { - return stream.owner.stdout.Read(buf) -} - -func (stream *sshSmartSubtransportStream) Write(buf []byte) (int, error) { - return stream.owner.stdin.Write(buf) -} - -func (stream *sshSmartSubtransportStream) Free() { -} - -func createClientConfig(authOpts *git.AuthOptions) (*ssh.ClientConfig, error) { - if authOpts == nil { - return nil, fmt.Errorf("cannot create ssh client config from nil ssh auth options") - } - - var signer ssh.Signer - var err error - if authOpts.Password != "" { - signer, err = ssh.ParsePrivateKeyWithPassphrase(authOpts.Identity, []byte(authOpts.Password)) - } else { - signer, err = ssh.ParsePrivateKey(authOpts.Identity) - } - if err != nil { - return nil, err - } - - cfg := &ssh.ClientConfig{ - User: authOpts.Username, - Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, - Timeout: sshConnectionTimeOut, - } - - if len(git.KexAlgos) > 0 { - cfg.Config.KeyExchanges = git.KexAlgos - } - if len(git.HostKeyAlgos) > 0 { - cfg.HostKeyAlgorithms = git.HostKeyAlgos - } - - return cfg, nil -} diff --git a/pkg/git/libgit2/managed/ssh_test.go b/pkg/git/libgit2/managed/ssh_test.go deleted file mode 100644 index 4e575f4e4..000000000 --- a/pkg/git/libgit2/managed/ssh_test.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package managed - -import ( - "net/url" - "os" - "path/filepath" - "testing" - "time" - - "github.com/fluxcd/pkg/ssh" - "github.com/fluxcd/source-controller/pkg/git" - . "github.com/onsi/gomega" - - "github.com/fluxcd/pkg/gittestserver" - git2go "github.com/libgit2/git2go/v33" -) - -func TestSSHAction_clientConfig(t *testing.T) { - kp, err := ssh.GenerateKeyPair(ssh.RSA_4096) - if err != nil { - t.Fatalf("could not generate keypair: %s", err) - } - tests := []struct { - name string - authOpts *git.AuthOptions - expectedUsername string - expectedAuthLen int - expectErr string - }{ - { - name: "nil SSHTransportOptions returns an error", - authOpts: nil, - expectErr: "cannot create ssh client config from nil ssh auth options", - }, - { - name: "valid SSHTransportOptions returns a valid SSHClientConfig", - authOpts: &git.AuthOptions{ - Identity: kp.PrivateKey, - Username: "user", - }, - expectedUsername: "user", - expectedAuthLen: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - cfg, err := createClientConfig(tt.authOpts) - if tt.expectErr != "" { - g.Expect(tt.expectErr).To(Equal(err.Error())) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cfg.User).To(Equal(tt.expectedUsername)) - g.Expect(len(cfg.Auth)).To(Equal(tt.expectedAuthLen)) - }) - } -} - -func TestSSH_E2E(t *testing.T) { - g := NewWithT(t) - - server, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(server.Root()) - - server.KeyDir(filepath.Join(server.Root(), "keys")) - - err = server.ListenSSH() - g.Expect(err).ToNot(HaveOccurred()) - - go func() { - server.StartSSH() - }() - defer server.StopSSH() - - kp, err := ssh.NewEd25519Generator().Generate() - g.Expect(err).ToNot(HaveOccurred()) - - repoPath := "test.git" - err = server.InitRepo("../../testdata/git/repo", git.DefaultBranch, repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - u, err := url.Parse(server.SSHAddress()) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(u.Host).ToNot(BeEmpty()) - knownhosts, err := ssh.ScanHostKey(u.Host, 5*time.Second, git.HostKeyAlgos, false) - g.Expect(err).NotTo(HaveOccurred()) - - transportOptsURL := "ssh://git@fake-url" - sshAddress := server.SSHAddress() + "/" + repoPath - AddTransportOptions(transportOptsURL, TransportOptions{ - TargetURL: sshAddress, - AuthOpts: &git.AuthOptions{ - Username: "user", - Identity: kp.PrivateKey, - KnownHosts: knownhosts, - }, - }) - - tmpDir := t.TempDir() - - // We call git2go.Clone with transportOptsURL, so that the managed ssh transport can - // fetch the correct set of credentials and the actual target url as well. - repo, err := git2go.Clone(transportOptsURL, tmpDir, &git2go.CloneOptions{ - FetchOptions: git2go.FetchOptions{ - RemoteCallbacks: RemoteCallbacks(), - }, - CheckoutOptions: git2go.CheckoutOptions{ - Strategy: git2go.CheckoutForce, - }, - }) - - g.Expect(err).ToNot(HaveOccurred()) - repo.Free() -} diff --git a/pkg/git/libgit2/managed/transport.go b/pkg/git/libgit2/managed/transport.go deleted file mode 100644 index ba4c5b338..000000000 --- a/pkg/git/libgit2/managed/transport.go +++ /dev/null @@ -1,103 +0,0 @@ -package managed - -import ( - "encoding/base64" - "fmt" - "net" - - pkgkh "github.com/fluxcd/pkg/ssh/knownhosts" - git2go "github.com/libgit2/git2go/v33" - "golang.org/x/crypto/ssh/knownhosts" -) - -// knownHostCallback returns a CertificateCheckCallback that verifies -// the key of Git server against the given host and known_hosts for -// git.SSH Transports. -func KnownHostsCallback(host string, knownHosts []byte) git2go.CertificateCheckCallback { - return func(cert *git2go.Certificate, valid bool, hostname string) error { - // First, attempt to split the configured host and port to validate - // the port-less hostname given to the callback. - hostWithoutPort, _, err := net.SplitHostPort(host) - if err != nil { - // SplitHostPort returns an error if the host is missing - // a port, assume the host has no port. - hostWithoutPort = host - } - - // Different versions of libgit handle this differently. - // This fixes the case in which ports may be sent back. - hostnameWithoutPort, _, err := net.SplitHostPort(hostname) - if err != nil { - hostnameWithoutPort = hostname - } - - if hostnameWithoutPort != hostWithoutPort { - return fmt.Errorf("host mismatch: %q %q", hostWithoutPort, hostnameWithoutPort) - } - - var fingerprint []byte - switch { - case cert.Hostkey.Kind&git2go.HostkeySHA256 > 0: - fingerprint = cert.Hostkey.HashSHA256[:] - default: - return fmt.Errorf("invalid host key kind, expected to be of kind SHA256") - } - - return CheckKnownHost(host, knownHosts, fingerprint) - } -} - -// CheckKnownHost checks whether the host being connected to is -// part of the known_hosts, and if so, it ensures the host -// fingerprint matches the fingerprint of the known host with -// the same name. -func CheckKnownHost(host string, knownHosts []byte, fingerprint []byte) error { - kh, err := pkgkh.ParseKnownHosts(string(knownHosts)) - if err != nil { - return fmt.Errorf("failed to parse known_hosts: %w", err) - } - - if len(kh) == 0 { - return fmt.Errorf("hostkey verification aborted: no known_hosts found") - } - - // We are now certain that the configured host and the hostname - // given to the callback match. Use the configured host (that - // includes the port), and normalize it, so we can check if there - // is an entry for the hostname _and_ port. - h := knownhosts.Normalize(host) - for _, k := range kh { - if k.Matches(h, fingerprint) { - return nil - } - } - return fmt.Errorf("no entries in known_hosts match host '%s' with fingerprint '%s'", - h, base64.RawStdEncoding.EncodeToString(fingerprint)) -} - -// RemoteCallbacks constructs git2go.RemoteCallbacks with dummy callbacks. -func RemoteCallbacks() git2go.RemoteCallbacks { - // This may not be fully removed as without some of the callbacks git2go - // gets anxious and panics. - return git2go.RemoteCallbacks{ - CredentialsCallback: credentialsCallback(), - CertificateCheckCallback: certificateCallback(), - } -} - -// credentialsCallback constructs a dummy CredentialsCallback. -func credentialsCallback() git2go.CredentialsCallback { - return func(url string, username string, allowedTypes git2go.CredentialType) (*git2go.Credential, error) { - // If credential is nil, panic will ensue. We fake it as managed transport does not - // require it. - return git2go.NewCredentialUserpassPlaintext("", "") - } -} - -// certificateCallback constructs a dummy CertificateCallback. -func certificateCallback() git2go.CertificateCheckCallback { - // returning a nil func can cause git2go to panic. - return func(cert *git2go.Certificate, valid bool, hostname string) error { - return nil - } -} diff --git a/pkg/git/libgit2/managed/transport_test.go b/pkg/git/libgit2/managed/transport_test.go deleted file mode 100644 index 7e68cd4d0..000000000 --- a/pkg/git/libgit2/managed/transport_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package managed - -import ( - "encoding/base64" - "fmt" - "testing" - - git2go "github.com/libgit2/git2go/v33" - . "github.com/onsi/gomega" -) - -// knownHostsFixture is known_hosts fixture in the expected -// format. -var knownHostsFixture = `github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== -github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= -github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl -` - -// To fetch latest knownhosts for source.developers.google.com run: -// ssh-keyscan -p 2022 source.developers.google.com -// -// Expected hash (used in the cases) can get found with: -// ssh-keyscan -p 2022 source.developers.google.com | ssh-keygen -l -f - -var knownHostsFixtureWithPort = `[source.developers.google.com]:2022 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB5Iy4/cq/gt/fPqe3uyMy4jwv1Alc94yVPxmnwNhBzJqEV5gRPiRk5u4/JJMbbu9QUVAguBABxL7sBZa5PH/xY=` - -// This is an incorrect known hosts entry, that does not aligned with -// the normalized format and therefore won't match. -var knownHostsFixtureUnormalized = `source.developers.google.com:2022 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB5Iy4/cq/gt/fPqe3uyMy4jwv1Alc94yVPxmnwNhBzJqEV5gRPiRk5u4/JJMbbu9QUVAguBABxL7sBZa5PH/xY=` - -func TestKnownHostsCallback(t *testing.T) { - tests := []struct { - name string - host string - expectedHost string - knownHosts []byte - hostkey git2go.HostkeyCertificate - want error - }{ - { - name: "Empty", - host: "source.developers.google.com", - knownHosts: []byte(""), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("AGvEpqYNMqsRNIviwyk4J4HM0lEylomDBKOWZsBn434")}, - expectedHost: "source.developers.google.com:2022", - want: fmt.Errorf("hostkey verification aborted: no known_hosts found"), - }, - { - name: "Mismatch incorrect known_hosts", - host: "source.developers.google.com", - knownHosts: []byte(knownHostsFixtureUnormalized), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("AGvEpqYNMqsRNIviwyk4J4HM0lEylomDBKOWZsBn434")}, - expectedHost: "source.developers.google.com:2022", - want: fmt.Errorf("no entries in known_hosts match host '[source.developers.google.com]:2022' with fingerprint 'AGvEpqYNMqsRNIviwyk4J4HM0lEylomDBKOWZsBn434'"), - }, - { - name: "Match when host has port", - host: "source.developers.google.com:2022", - knownHosts: []byte(knownHostsFixtureWithPort), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("AGvEpqYNMqsRNIviwyk4J4HM0lEylomDBKOWZsBn434")}, - expectedHost: "source.developers.google.com:2022", - want: nil, - }, - { - name: "Match even when host does not have port", - host: "source.developers.google.com", - knownHosts: []byte(knownHostsFixtureWithPort), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("AGvEpqYNMqsRNIviwyk4J4HM0lEylomDBKOWZsBn434")}, - expectedHost: "source.developers.google.com:2022", - want: nil, - }, - { - name: "Match", - host: "github.com", - knownHosts: []byte(knownHostsFixture), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8")}, - expectedHost: "github.com", - want: nil, - }, - { - name: "Match with port", - host: "github.com", - knownHosts: []byte(knownHostsFixture), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8")}, - expectedHost: "github.com:22", - want: nil, - }, - { - // Test case to specifically detect a regression introduced in v0.25.0 - // Ref: https://github.com/fluxcd/image-automation-controller/issues/378 - name: "Match regardless of order of known_hosts", - host: "github.com", - knownHosts: []byte(knownHostsFixture), - // Use ecdsa-sha2-nistp256 instead of ssh-rsa - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("p2QAMXNIC1TJYWeIOttrVc98/R1BUFWu3/LiyKgUfQM")}, - expectedHost: "github.com:22", - want: nil, - }, - { - name: "Hostname mismatch", - host: "github.com", - knownHosts: []byte(knownHostsFixture), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8")}, - expectedHost: "example.com", - want: fmt.Errorf("host mismatch: %q %q", "example.com", "github.com"), - }, - { - name: "Hostkey mismatch", - host: "github.com", - knownHosts: []byte(knownHostsFixture), - hostkey: git2go.HostkeyCertificate{Kind: git2go.HostkeySHA256, HashSHA256: sha256Fingerprint("ROQFvPThGrW4RuWLoL9tq9I9zJ42fK4XywyRtbOz/EQ")}, - expectedHost: "github.com", - want: fmt.Errorf("no entries in known_hosts match host 'github.com' with fingerprint 'ROQFvPThGrW4RuWLoL9tq9I9zJ42fK4XywyRtbOz/EQ'"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - cert := &git2go.Certificate{Hostkey: tt.hostkey} - callback := KnownHostsCallback(tt.expectedHost, tt.knownHosts) - result := g.Expect(callback(cert, false, tt.host)) - if tt.want == nil { - result.To(BeNil()) - } else { - result.To(Equal(tt.want)) - } - }) - } -} - -func sha256Fingerprint(in string) [32]byte { - d, err := base64.RawStdEncoding.DecodeString(in) - if err != nil { - panic(err) - } - var out [32]byte - copy(out[:], d) - return out -} diff --git a/pkg/git/options.go b/pkg/git/options.go deleted file mode 100644 index a9169a590..000000000 --- a/pkg/git/options.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package git - -import ( - "fmt" - "net/url" - - v1 "k8s.io/api/core/v1" -) - -const ( - DefaultOrigin = "origin" - DefaultBranch = "master" - DefaultPublicKeyAuthUser = "git" -) - -// CheckoutOptions are the options used for a Git checkout. -type CheckoutOptions struct { - // Branch to checkout, can be combined with Branch with some - // Implementations. - Branch string - - // Tag to checkout, takes precedence over Branch. - Tag string - - // SemVer tag expression to checkout, takes precedence over Tag. - SemVer string `json:"semver,omitempty"` - - // Commit SHA1 to checkout, takes precedence over Tag and SemVer, - // can be combined with Branch with some Implementations. - Commit string - - // RecurseSubmodules defines if submodules should be checked out, - // not supported by all Implementations. - RecurseSubmodules bool - - // LastRevision holds the last observed revision of the local repository. - // It is used to skip clone operations when no changes were detected. - LastRevision string -} - -type TransportType string - -const ( - SSH TransportType = "ssh" - HTTPS TransportType = "https" - HTTP TransportType = "http" -) - -// AuthOptions are the authentication options for the Transport of -// communication with a remote origin. -type AuthOptions struct { - Transport TransportType - Host string - Username string - Password string - Identity []byte - KnownHosts []byte - CAFile []byte - // TransportOptionsURL is a unique identifier for this set of authentication - // options. It's used by managed libgit2 transports to uniquely identify - // which credentials to use for a particular Git operation, and avoid misuse - // of credentials in a multi-tenant environment. - // It must be prefixed with a valid transport protocol ("ssh:// "or "http://") because - // of the way managed transports are registered and invoked. - // It's a field of AuthOptions despite not providing any kind of authentication - // info, as it's the only way to sneak it into git.Checkout, without polluting - // it's args and keeping it generic. - TransportOptionsURL string -} - -// KexAlgos hosts the key exchange algorithms to be used for SSH connections. -// If empty, Go's default is used instead. -var KexAlgos []string - -// HostKeyAlgos holds the HostKey algorithms that the SSH client will advertise -// to the server. If empty, Go's default is used instead. -var HostKeyAlgos []string - -// Validate the AuthOptions against the defined Transport. -func (o AuthOptions) Validate() error { - switch o.Transport { - case HTTPS, HTTP: - if o.Username == "" && o.Password != "" { - return fmt.Errorf("invalid '%s' auth option: 'password' requires 'username' to be set", o.Transport) - } - case SSH: - if o.Host == "" { - return fmt.Errorf("invalid '%s' auth option: 'host' is required", o.Transport) - } - if len(o.Identity) == 0 { - return fmt.Errorf("invalid '%s' auth option: 'identity' is required", o.Transport) - } - if len(o.KnownHosts) == 0 { - return fmt.Errorf("invalid '%s' auth option: 'known_hosts' is required", o.Transport) - } - case "": - return fmt.Errorf("no transport type set") - default: - return fmt.Errorf("unknown transport '%s'", o.Transport) - } - return nil -} - -// AuthOptionsFromSecret constructs an AuthOptions object from the given Secret, -// and then validates the result. It returns the AuthOptions, or an error. -func AuthOptionsFromSecret(URL string, secret *v1.Secret) (*AuthOptions, error) { - if secret == nil { - return nil, fmt.Errorf("no secret provided to construct auth strategy from") - } - - u, err := url.Parse(URL) - if err != nil { - return nil, fmt.Errorf("failed to parse URL to determine auth strategy: %w", err) - } - - opts := &AuthOptions{ - Transport: TransportType(u.Scheme), - Host: u.Host, - Username: string(secret.Data["username"]), - Password: string(secret.Data["password"]), - CAFile: secret.Data["caFile"], - Identity: secret.Data["identity"], - KnownHosts: secret.Data["known_hosts"], - } - if opts.Username == "" { - opts.Username = u.User.Username() - } - if opts.Username == "" { - opts.Username = DefaultPublicKeyAuthUser - } - - if err = opts.Validate(); err != nil { - return nil, err - } - - return opts, nil -} - -// AuthOptionsWithoutSecret constructs a minimal AuthOptions object from the -// given URL and then validates the result. It returns the AuthOptions, or an -// error. -func AuthOptionsWithoutSecret(URL string) (*AuthOptions, error) { - u, err := url.Parse(URL) - if err != nil { - return nil, fmt.Errorf("failed to parse URL to determine auth strategy: %w", err) - } - - opts := &AuthOptions{ - Transport: TransportType(u.Scheme), - Host: u.Host, - } - - if err = opts.Validate(); err != nil { - return nil, err - } - - return opts, nil -} diff --git a/pkg/git/options_test.go b/pkg/git/options_test.go deleted file mode 100644 index 17defd94a..000000000 --- a/pkg/git/options_test.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package git - -import ( - "testing" - - . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" -) - -const ( - // privateKeyFixture is a randomly generated password less - // 512bit RSA private key. - privateKeyFixture = `-----BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQCrakELAKxozvwJijQEggYlTvS1QTZx1DaBwOhW/4kRSuR21plu -xuQeyuUiztoWeb9jgW7wjzG4j1PIJjdbsgjPIcIZ4PBY7JeEW+QRopfwuN8MHXNp -uTLgIHbkmhoOg5qBEcjzO/lEOOPpV0EmbObgqv3+wRmLJrgfzWl/cTtRewIDAQAB -AoGAawKFImpEN5Xn78iwWpQVZBsbV0AjzgHuGSiloxIZrorzf2DPHkHZzYNaclVx -/o/4tBTsfg7WumH3qr541qyZJDgU7iRMABwmx0v1vm2wQiX7NJzLzH2E9vlMC3mw -d8S99g9EqRuNH98XX8su34B9WGRPqiKvEm0RW8Hideo2/KkCQQDbs6rHcriKQyPB -paidHZAfguu0eVbyHT2EgLgRboWE+tEAqFEW2ycqNL3VPz9fRvwexbB6rpOcPpQJ -DEL4XB2XAkEAx7xJz8YlCQ2H38xggK8R8EUXF9Zhb0fqMJHMNmao1HCHVMtbsa8I -jR2EGyQ4CaIqNG5tdWukXQSJrPYDRWNvvQJAZX3rP7XUYDLB2twvN12HzbbKMhX3 -v2MYnxRjc9INpi/Dyzz2MMvOnOW+aDuOh/If2AtVCmeJUx1pf4CFk3viQwJBAKyC -t824+evjv+NQBlme3AOF6PgxtV4D4wWoJ5Uk/dTejER0j/Hbl6sqPxuiILRRV9qJ -Ngkgu4mLjc3RfenEhJECQAx8zjWUE6kHHPGAd9DfiAIQ4bChqnyS0Nwb9+Gd4hSE -P0Ah10mHiK/M0o3T8Eanwum0gbQHPnOwqZgsPkwXRqQ= ------END RSA PRIVATE KEY-----` - - // privateKeyPassphraseFixture is a randomly generated - // 512bit RSA private key with password foobar. - privateKeyPassphraseFixture = `-----BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: AES-256-CBC,0B016973B2A761D31E6B388D0F327C35 - -X9GET/qAyZkAJBl/RK+1XX75NxONgdUfZDw7PIYi/g+Efh3Z5zH5kh/dx9lxH5ZG -HGCqPAeMO/ofGDGtDULWW6iqDUFRu5gPgEVSCnnbqoHNU325WHhXdhejVAItwObC -IpL/zYfs2+gDHXct/n9FJ/9D/EGXZihwPqYaK8GQSfZAxz0QjLuh0wU1qpbm3y3N -q+o9FLv3b2Ys/tCJOUsYVQOYLSrZEI77y1ii3nWgQ8lXiTJbBUKzuq4f1YWeO8Ah -RZbdhTa57AF5lUaRtL7Nrm3HJUrK1alBbU7HHyjeW4Q4n/D3fiRDC1Mh2Bi4EOOn -wGctSx4kHsZGhJv5qwKqqPEFPhUzph8D2tm2TABk8HJa5KJFDbGrcfvk2uODAoZr -MbcpIxCfl8oB09bWfY6tDQjyvwSYYo2Phdwm7kT92xc= ------END RSA PRIVATE KEY-----` - - // knownHostsFixture is known_hosts fixture in the expected - // format. - knownHostsFixture = `github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==` -) - -func TestAuthOptions_Validate(t *testing.T) { - tests := []struct { - name string - opts AuthOptions - wantErr string - }{ - { - name: "HTTP transport with password requires user", - opts: AuthOptions{ - Transport: HTTP, - Password: "foo", - }, - wantErr: "invalid 'http' auth option: 'password' requires 'username' to be set", - }, - { - name: "Valid HTTP transport", - opts: AuthOptions{ - Transport: HTTP, - Username: "example", - Password: "foo", - }, - }, - { - name: "HTTPS transport with password requires user", - opts: AuthOptions{ - Transport: HTTPS, - Password: "foo", - }, - wantErr: "invalid 'https' auth option: 'password' requires 'username' to be set", - }, - { - name: "Valid HTTPS transport", - opts: AuthOptions{ - Transport: HTTPS, - Username: "example", - Password: "foo", - }, - }, - { - name: "Valid HTTPS without any config", - opts: AuthOptions{ - Transport: HTTPS, - }, - }, - { - name: "SSH transport requires host", - opts: AuthOptions{ - Transport: SSH, - }, - wantErr: "invalid 'ssh' auth option: 'host' is required", - }, - { - name: "SSH transport requires identity", - opts: AuthOptions{ - Transport: SSH, - Host: "github.com:22", - }, - wantErr: "invalid 'ssh' auth option: 'identity' is required", - }, - { - name: "SSH transport requires known_hosts", - opts: AuthOptions{ - Transport: SSH, - Host: "github.com:22", - Identity: []byte(privateKeyFixture), - }, - wantErr: "invalid 'ssh' auth option: 'known_hosts' is required", - }, - { - name: "Requires transport", - opts: AuthOptions{}, - wantErr: "no transport type set", - }, - { - name: "Valid SSH transport", - opts: AuthOptions{ - Host: "github.com:22", - Transport: SSH, - Identity: []byte(privateKeyPassphraseFixture), - Password: "foobar", - KnownHosts: []byte(knownHostsFixture), - }, - }, - { - name: "No transport", - opts: AuthOptions{}, - wantErr: "no transport type set", - }, - { - name: "Unknown transport", - opts: AuthOptions{ - Transport: "foo", - }, - wantErr: "unknown transport 'foo'", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - got := tt.opts.Validate() - if tt.wantErr != "" { - g.Expect(got.Error()).To(ContainSubstring(tt.wantErr)) - return - } - g.Expect(got).ToNot(HaveOccurred()) - }) - } -} - -func TestAuthOptionsFromSecret(t *testing.T) { - tests := []struct { - name string - URL string - secret *v1.Secret - wantFunc func(g *WithT, opts *AuthOptions, secret *v1.Secret) - wantErr string - }{ - { - name: "Sets values from Secret", - URL: "https://git@example.com", - secret: &v1.Secret{ - Data: map[string][]byte{ - "username": []byte("example"), // This takes precedence over the one from the URL - "password": []byte("secret"), - "identity": []byte(privateKeyFixture), - "known_hosts": []byte(knownHostsFixture), - "caFile": []byte("mock"), - }, - }, - wantFunc: func(g *WithT, opts *AuthOptions, secret *v1.Secret) { - g.Expect(opts.Username).To(Equal("example")) - g.Expect(opts.Password).To(Equal("secret")) - g.Expect(opts.Identity).To(BeEquivalentTo(privateKeyFixture)) - g.Expect(opts.KnownHosts).To(BeEquivalentTo(knownHostsFixture)) - g.Expect(opts.CAFile).To(BeEquivalentTo("mock")) - }, - }, - { - name: "Sets default user", - URL: "http://example.com", - secret: &v1.Secret{}, - wantFunc: func(g *WithT, opts *AuthOptions, secret *v1.Secret) { - g.Expect(opts.Username).To(Equal(DefaultPublicKeyAuthUser)) - }, - }, - { - name: "Sets transport from URL", - URL: "http://git@example.com", - secret: &v1.Secret{}, - wantFunc: func(g *WithT, opts *AuthOptions, secret *v1.Secret) { - g.Expect(opts.Transport).To(Equal(HTTP)) - }, - }, - { - name: "Sets user from URL", - URL: "http://example@example.com", - secret: &v1.Secret{ - Data: map[string][]byte{ - "password": []byte("secret"), - }, - }, - wantFunc: func(g *WithT, opts *AuthOptions, secret *v1.Secret) { - g.Expect(opts.Username).To(Equal("example")) - g.Expect(opts.Password).To(Equal("secret")) - }, - }, - { - name: "Validates options", - URL: "ssh://example.com", - secret: &v1.Secret{ - Data: map[string][]byte{ - "identity": []byte(privateKeyFixture), - }, - }, - wantErr: "invalid 'ssh' auth option: 'known_hosts' is required", - }, - { - name: "Errors without secret", - secret: nil, - wantErr: "no secret provided to construct auth strategy from", - }, - { - name: "Errors on malformed URL", - URL: ":example", - secret: &v1.Secret{}, - wantErr: "failed to parse URL to determine auth strategy", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - got, err := AuthOptionsFromSecret(tt.URL, tt.secret) - if tt.wantErr != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(got).To(BeNil()) - return - } - - g.Expect(err).To(BeNil()) - if tt.wantFunc != nil { - tt.wantFunc(g, got, tt.secret) - } - }) - } -} diff --git a/pkg/git/strategy/proxy/strategy_proxy_test.go b/pkg/git/strategy/proxy/strategy_proxy_test.go deleted file mode 100644 index 0006e1187..000000000 --- a/pkg/git/strategy/proxy/strategy_proxy_test.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proxy - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "os" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/elazarl/goproxy" - "github.com/fluxcd/pkg/gittestserver" - . "github.com/onsi/gomega" - - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/gogit" - "github.com/fluxcd/source-controller/pkg/git/libgit2" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" - "github.com/fluxcd/source-controller/pkg/git/strategy" -) - -// These tests are run in a different _test.go file because go-git uses the ProxyFromEnvironment function of the net/http package -// which caches the Proxy settings, hence not including other tests in the same file ensures a clean proxy setup for the tests to run. -func TestCheckoutStrategyForImplementation_Proxied(t *testing.T) { - managed.InitManagedTransport() - - type cleanupFunc func() - - type testCase struct { - name string - gitImpl git.Implementation - url string - branch string - setupGitProxy func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) - shortTimeout bool - wantUsedProxy bool - wantError bool - } - - g := NewWithT(t) - - // Get a free port for proxy to use. - l, err := net.Listen("tcp", ":0") - g.Expect(err).ToNot(HaveOccurred()) - proxyAddr := fmt.Sprintf("localhost:%d", l.Addr().(*net.TCPAddr).Port) - g.Expect(l.Close()).ToNot(HaveOccurred()) - - cases := []testCase{ - { - name: "gogit_HTTP_PROXY", - gitImpl: gogit.Implementation, - url: "http://example.com/bar/test-reponame", - branch: "main", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - // Create the git server. - gitServer, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - - username := "test-user" - password := "test-password" - gitServer.Auth(username, password) - gitServer.KeyDir(gitServer.Root()) - - g.Expect(gitServer.StartHTTP()).ToNot(HaveOccurred()) - - // Initialize a git repo. - err = gitServer.InitRepo("../testdata/repo1", "main", "bar/test-reponame") - g.Expect(err).ToNot(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - g.Expect(err).ToNot(HaveOccurred()) - - // The request is being forwarded to the local test git server in this handler. - var proxyHandler goproxy.FuncReqHandler = func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) { - userAgent := req.Header.Get("User-Agent") - if strings.Contains(req.Host, "example.com") && strings.Contains(userAgent, "git") { - atomic.AddInt32(proxiedRequests, 1) - req.Host = u.Host - req.URL.Host = req.Host - return req, nil - } - // Reject if it isnt our request. - return req, goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusForbidden, "") - } - proxy.OnRequest().Do(proxyHandler) - - return &git.AuthOptions{ - Transport: git.HTTP, - Username: username, - Password: password, - }, func() { - os.RemoveAll(gitServer.Root()) - gitServer.StopHTTP() - } - }, - shortTimeout: false, - wantUsedProxy: true, - wantError: false, - }, - { - name: "gogit_HTTPS_PROXY", - gitImpl: gogit.Implementation, - url: "https://github.com/git-fixtures/basic", - branch: "master", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - var proxyHandler goproxy.FuncHttpsHandler = func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) { - // We don't check for user agent as this handler is only going to process CONNECT requests, and because Go's net/http - // is the one making such a request on behalf of go-git, adding a check for the go net/http user agent (Go-http-client) - // would only allow false positives from any request originating from Go's net/http. - if strings.Contains(host, "github.com") { - atomic.AddInt32(proxiedRequests, 1) - return goproxy.OkConnect, host - } - // Reject if it isnt our request. - return goproxy.RejectConnect, host - } - proxy.OnRequest().HandleConnect(proxyHandler) - - // go-git does not allow to use an HTTPS proxy and a custom root CA at the same time. - // See https://github.com/fluxcd/source-controller/pull/524#issuecomment-1006673163. - return nil, func() {} - }, - shortTimeout: false, - wantUsedProxy: true, - wantError: false, - }, - { - name: "gogit_NO_PROXY", - gitImpl: gogit.Implementation, - url: "https://192.0.2.1/bar/test-reponame", - branch: "main", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - var proxyHandler goproxy.FuncHttpsHandler = func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) { - // We shouldn't hit the proxy so we just want to check for any interaction, then reject. - atomic.AddInt32(proxiedRequests, 1) - return goproxy.RejectConnect, host - } - proxy.OnRequest().HandleConnect(proxyHandler) - - return nil, func() {} - }, - shortTimeout: true, - wantUsedProxy: false, - wantError: true, - }, - { - name: "libgit2_HTTPS_PROXY", - gitImpl: libgit2.Implementation, - url: "https://example.com/bar/test-reponame", - branch: "main", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - // Create the git server. - gitServer, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - - username := "test-user" - password := "test-password" - gitServer.Auth(username, password) - gitServer.KeyDir(gitServer.Root()) - - // Start the HTTPS server. - examplePublicKey, err := os.ReadFile("../testdata/certs/server.pem") - g.Expect(err).ToNot(HaveOccurred()) - examplePrivateKey, err := os.ReadFile("../testdata/certs/server-key.pem") - g.Expect(err).ToNot(HaveOccurred()) - exampleCA, err := os.ReadFile("../testdata/certs/ca.pem") - g.Expect(err).ToNot(HaveOccurred()) - err = gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - g.Expect(err).ToNot(HaveOccurred()) - - // Initialize a git repo. - repoPath := "bar/test-reponame" - err = gitServer.InitRepo("../testdata/repo1", "main", repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - g.Expect(err).ToNot(HaveOccurred()) - - // The request is being forwarded to the local test git server in this handler. - // The certificate used here is valid for both example.com and localhost. - var proxyHandler goproxy.FuncHttpsHandler = func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) { - defer managed.RemoveTransportOptions("https://example.com/bar/test-reponame") - // Check if the host matches with the git server address and the user-agent is the expected git client. - userAgent := ctx.Req.Header.Get("User-Agent") - if strings.Contains(host, "example.com") && strings.Contains(userAgent, "libgit2") { - atomic.AddInt32(proxiedRequests, 1) - return goproxy.OkConnect, u.Host - } - // Reject if it isn't our request. - return goproxy.RejectConnect, host - } - proxy.OnRequest().HandleConnect(proxyHandler) - - return &git.AuthOptions{ - Transport: git.HTTPS, - Username: username, - Password: password, - CAFile: exampleCA, - TransportOptionsURL: "https://proxy-test", - }, func() { - os.RemoveAll(gitServer.Root()) - gitServer.StopHTTP() - } - }, - shortTimeout: false, - wantUsedProxy: true, - wantError: false, - }, - { - name: "libgit2_HTTP_PROXY", - gitImpl: libgit2.Implementation, - url: "http://example.com/bar/test-reponame", - branch: "main", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - // Create the git server. - gitServer, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - - err = gitServer.StartHTTP() - g.Expect(err).ToNot(HaveOccurred()) - - // Initialize a git repo. - repoPath := "bar/test-reponame" - err = gitServer.InitRepo("../testdata/repo1", "main", repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - g.Expect(err).ToNot(HaveOccurred()) - - // The request is being forwarded to the local test git server in this handler. - // The certificate used here is valid for both example.com and localhost. - var proxyHandler goproxy.FuncReqHandler = func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) { - userAgent := req.Header.Get("User-Agent") - if strings.Contains(req.Host, "example.com") && strings.Contains(userAgent, "git") { - atomic.AddInt32(proxiedRequests, 1) - req.Host = u.Host - req.URL.Host = req.Host - return req, nil - } - // Reject if it isnt our request. - return req, goproxy.NewResponse(req, goproxy.ContentTypeText, http.StatusForbidden, "") - } - proxy.OnRequest().Do(proxyHandler) - - return &git.AuthOptions{ - Transport: git.HTTP, - TransportOptionsURL: "http://proxy-test", - }, func() { - os.RemoveAll(gitServer.Root()) - gitServer.StopHTTP() - } - }, - shortTimeout: false, - wantUsedProxy: true, - wantError: false, - }, - { - name: "gogit_HTTPS_PROXY", - gitImpl: gogit.Implementation, - url: "https://github.com/git-fixtures/basic", - branch: "master", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - var proxyHandler goproxy.FuncHttpsHandler = func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) { - // We don't check for user agent as this handler is only going to process CONNECT requests, and because Go's net/http - // is the one making such a request on behalf of go-git, adding a check for the go net/http user agent (Go-http-client) - // would only allow false positives from any request originating from Go's net/http. - if strings.Contains(host, "github.com") { - atomic.AddInt32(proxiedRequests, 1) - return goproxy.OkConnect, host - } - // Reject if it isnt our request. - return goproxy.RejectConnect, host - } - proxy.OnRequest().HandleConnect(proxyHandler) - - // go-git does not allow to use an HTTPS proxy and a custom root CA at the same time. - // See https://github.com/fluxcd/source-controller/pull/524#issuecomment-1006673163. - return nil, func() {} - }, - shortTimeout: false, - wantUsedProxy: true, - wantError: false, - }, - { - name: "gogit_NO_PROXY", - gitImpl: gogit.Implementation, - url: "https://192.0.2.1/bar/test-reponame", - branch: "main", - setupGitProxy: func(g *WithT, proxy *goproxy.ProxyHttpServer, proxiedRequests *int32) (*git.AuthOptions, cleanupFunc) { - var proxyHandler goproxy.FuncHttpsHandler = func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) { - // We shouldn't hit the proxy so we just want to check for any interaction, then reject. - atomic.AddInt32(proxiedRequests, 1) - return goproxy.RejectConnect, host - } - proxy.OnRequest().HandleConnect(proxyHandler) - - return nil, func() {} - }, - shortTimeout: true, - wantUsedProxy: false, - wantError: true, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - // Run a proxy server. - proxy := goproxy.NewProxyHttpServer() - proxy.Verbose = true - - proxiedRequests := int32(0) - authOpts, cleanup := tt.setupGitProxy(g, proxy, &proxiedRequests) - defer cleanup() - - proxyServer := http.Server{ - Addr: proxyAddr, - Handler: proxy, - } - l, err := net.Listen("tcp", proxyServer.Addr) - g.Expect(err).ToNot(HaveOccurred()) - go proxyServer.Serve(l) - defer proxyServer.Close() - - // Set the proxy env vars for both HTTP and HTTPS because go-git caches them. - os.Setenv("HTTPS_PROXY", fmt.Sprintf("http://smth:else@%s", proxyAddr)) - defer os.Unsetenv("HTTPS_PROXY") - - os.Setenv("HTTP_PROXY", fmt.Sprintf("http://smth:else@%s", proxyAddr)) - defer os.Unsetenv("HTTP_PROXY") - - os.Setenv("NO_PROXY", "*.0.2.1") - defer os.Unsetenv("NO_PROXY") - - // Checkout the repo. - checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(context.TODO(), tt.gitImpl, git.CheckoutOptions{ - Branch: tt.branch, - }) - g.Expect(err).ToNot(HaveOccurred()) - - tmpDir := t.TempDir() - - // for the NO_PROXY test we dont want to wait the 30s for it to timeout/fail, so shorten the timeout - checkoutCtx := context.TODO() - if tt.shortTimeout { - var cancel context.CancelFunc - checkoutCtx, cancel = context.WithTimeout(context.TODO(), 1*time.Second) - defer cancel() - } - - _, err = checkoutStrategy.Checkout(checkoutCtx, tmpDir, tt.url, authOpts) - if tt.wantError { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - g.Expect(atomic.LoadInt32(&proxiedRequests) > 0).To(Equal(tt.wantUsedProxy)) - - }) - } -} diff --git a/pkg/git/strategy/strategy.go b/pkg/git/strategy/strategy.go deleted file mode 100644 index 46d4e58ae..000000000 --- a/pkg/git/strategy/strategy.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import ( - "context" - "fmt" - - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/gogit" - "github.com/fluxcd/source-controller/pkg/git/libgit2" -) - -// CheckoutStrategyForImplementation returns the CheckoutStrategy for the given -// git.Implementation and git.CheckoutOptions. -func CheckoutStrategyForImplementation(ctx context.Context, impl git.Implementation, opts git.CheckoutOptions) (git.CheckoutStrategy, error) { - switch impl { - case gogit.Implementation: - return gogit.CheckoutStrategyForOptions(ctx, opts), nil - case libgit2.Implementation: - return libgit2.CheckoutStrategyForOptions(ctx, opts), nil - default: - return nil, fmt.Errorf("unsupported Git implementation '%s'", impl) - } -} diff --git a/pkg/git/strategy/strategy_test.go b/pkg/git/strategy/strategy_test.go deleted file mode 100644 index 2aee3a229..000000000 --- a/pkg/git/strategy/strategy_test.go +++ /dev/null @@ -1,513 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import ( - "context" - "errors" - "fmt" - "math/rand" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/ssh" - extgogit "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - . "github.com/onsi/gomega" - - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/gogit" - "github.com/fluxcd/source-controller/pkg/git/libgit2" - "github.com/fluxcd/source-controller/pkg/git/libgit2/managed" -) - -func TestMain(m *testing.M) { - err := managed.InitManagedTransport() - if err != nil { - panic(fmt.Sprintf("failed to initialize libgit2 managed transport: %s", err)) - } - code := m.Run() - os.Exit(code) -} - -func TestCheckoutStrategyForImplementation_Auth(t *testing.T) { - gitImpls := []git.Implementation{gogit.Implementation, libgit2.Implementation} - - type testCase struct { - name string - transport git.TransportType - repoURLFunc func(g *WithT, srv *gittestserver.GitServer, repoPath string) string - authOptsFunc func(g *WithT, u *url.URL, user string, pswd string, ca []byte) *git.AuthOptions - wantFunc func(g *WithT, cs git.CheckoutStrategy, dir string, repoURL string, authOpts *git.AuthOptions) - } - - cases := []testCase{ - { - name: "HTTP clone", - transport: git.HTTP, - repoURLFunc: func(g *WithT, srv *gittestserver.GitServer, repoPath string) string { - return srv.HTTPAddressWithCredentials() + "/" + repoPath - }, - authOptsFunc: func(g *WithT, u *url.URL, user string, pswd string, ca []byte) *git.AuthOptions { - return &git.AuthOptions{ - Transport: git.HTTP, - Username: user, - Password: pswd, - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - }, - wantFunc: func(g *WithT, cs git.CheckoutStrategy, dir string, repoURL string, authOpts *git.AuthOptions) { - _, err := cs.Checkout(context.TODO(), dir, repoURL, authOpts) - g.Expect(err).ToNot(HaveOccurred()) - }, - }, - { - name: "HTTPS clone", - transport: git.HTTPS, - repoURLFunc: func(g *WithT, srv *gittestserver.GitServer, repoPath string) string { - return srv.HTTPAddress() + "/" + repoPath - }, - authOptsFunc: func(g *WithT, u *url.URL, user, pswd string, ca []byte) *git.AuthOptions { - return &git.AuthOptions{ - Transport: git.HTTPS, - Username: user, - Password: pswd, - CAFile: ca, - TransportOptionsURL: getTransportOptionsURL(git.HTTPS), - } - }, - wantFunc: func(g *WithT, cs git.CheckoutStrategy, dir, repoURL string, authOpts *git.AuthOptions) { - _, err := cs.Checkout(context.TODO(), dir, repoURL, authOpts) - g.Expect(err).ToNot(HaveOccurred()) - }, - }, - { - name: "SSH clone", - transport: git.SSH, - repoURLFunc: func(g *WithT, srv *gittestserver.GitServer, repoPath string) string { - return getSSHRepoURL(srv.SSHAddress(), repoPath) - }, - authOptsFunc: func(g *WithT, u *url.URL, user, pswd string, ca []byte) *git.AuthOptions { - knownhosts, err := ssh.ScanHostKey(u.Host, 5*time.Second, git.HostKeyAlgos, false) - g.Expect(err).ToNot(HaveOccurred()) - - keygen := ssh.NewRSAGenerator(2048) - pair, err := keygen.Generate() - g.Expect(err).ToNot(HaveOccurred()) - - return &git.AuthOptions{ - Host: u.Host, // Without this libgit2 returns error "user cancelled hostkey check". - Transport: git.SSH, - Username: "git", // Without this libgit2 returns error "username does not match previous request". - Identity: pair.PrivateKey, - KnownHosts: knownhosts, - TransportOptionsURL: getTransportOptionsURL(git.SSH), - } - }, - wantFunc: func(g *WithT, cs git.CheckoutStrategy, dir, repoURL string, authOpts *git.AuthOptions) { - _, err := cs.Checkout(context.TODO(), dir, repoURL, authOpts) - g.Expect(err).ToNot(HaveOccurred()) - }, - }, - } - - testFunc := func(tt testCase, impl git.Implementation) func(t *testing.T) { - return func(t *testing.T) { - g := NewWithT(t) - - var examplePublicKey, examplePrivateKey, exampleCA []byte - - gitServer, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(gitServer.Root()) - - username := "test-user" - password := "test-password" - gitServer.Auth(username, password) - gitServer.KeyDir(gitServer.Root()) - - // Start the HTTP/HTTPS server. - if tt.transport == git.HTTPS { - var err error - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") - g.Expect(err).ToNot(HaveOccurred()) - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") - g.Expect(err).ToNot(HaveOccurred()) - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - g.Expect(err).ToNot(HaveOccurred()) - err = gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - g.Expect(err).ToNot(HaveOccurred()) - } else { - g.Expect(gitServer.StartHTTP()).ToNot(HaveOccurred()) - } - - defer gitServer.StopHTTP() - - // Start the SSH server. - if tt.transport == git.SSH { - g.Expect(gitServer.ListenSSH()).ToNot(HaveOccurred()) - go func() { - gitServer.StartSSH() - }() - defer func() { - g.Expect(gitServer.StopSSH()).To(Succeed()) - }() - } - - // Initialize a git repo. - branch := "main" - repoPath := "bar/test-reponame" - err = gitServer.InitRepo("testdata/repo1", branch, repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - repoURL := tt.repoURLFunc(g, gitServer, repoPath) - u, err := url.Parse(repoURL) - g.Expect(err).ToNot(HaveOccurred()) - authOpts := tt.authOptsFunc(g, u, username, password, exampleCA) - - // Get the checkout strategy. - checkoutOpts := git.CheckoutOptions{ - Branch: branch, - } - checkoutStrategy, err := CheckoutStrategyForImplementation(context.TODO(), impl, checkoutOpts) - g.Expect(err).ToNot(HaveOccurred()) - - tmpDir := t.TempDir() - - tt.wantFunc(g, checkoutStrategy, tmpDir, repoURL, authOpts) - } - } - - // Run the test cases against the git implementations. - for _, gitImpl := range gitImpls { - for _, tt := range cases { - t.Run(fmt.Sprintf("%s_%s", gitImpl, tt.name), testFunc(tt, gitImpl)) - } - } -} - -func getSSHRepoURL(sshAddress, repoPath string) string { - // This is expected to use 127.0.0.1, but host key - // checking usually wants a hostname, so use - // "localhost". - sshURL := strings.Replace(sshAddress, "127.0.0.1", "localhost", 1) - return sshURL + "/" + repoPath -} - -func TestCheckoutStrategyForImplementation_SemVerCheckout(t *testing.T) { - g := NewWithT(t) - - gitImpls := []git.Implementation{gogit.Implementation, libgit2.Implementation} - - // Setup git server and repo. - gitServer, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(gitServer.Root()) - username := "test-user" - password := "test-password" - gitServer.Auth(username, password) - gitServer.KeyDir(gitServer.Root()) - g.Expect(gitServer.StartHTTP()).ToNot(HaveOccurred()) - defer gitServer.StopHTTP() - - repoPath := "bar/test-reponame" - err = gitServer.InitRepo("testdata/repo1", "main", repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - repoURL := gitServer.HTTPAddressWithCredentials() + "/" + repoPath - - authOpts := &git.AuthOptions{ - Transport: git.HTTP, - Username: username, - Password: password, - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - - // Create test tags in the repo. - now := time.Now() - tags := []struct { - tag string - annotated bool - commitTime time.Time - tagTime time.Time - }{ - { - tag: "v0.0.1", - annotated: false, - commitTime: now, - }, - { - tag: "v0.1.0+build-1", - annotated: true, - commitTime: now.Add(10 * time.Minute), - tagTime: now.Add(2 * time.Hour), // This should be ignored during TS comparisons - }, - { - tag: "v0.1.0+build-2", - annotated: false, - commitTime: now.Add(30 * time.Minute), - }, - { - tag: "v0.1.0+build-3", - annotated: true, - commitTime: now.Add(1 * time.Hour), - tagTime: now.Add(1 * time.Hour), // This should be ignored during TS comparisons - }, - { - tag: "0.2.0", - annotated: true, - commitTime: now, - tagTime: now, - }, - } - - // Clone the repo locally. - cloneDir := t.TempDir() - repo, err := extgogit.PlainClone(cloneDir, false, &extgogit.CloneOptions{ - URL: repoURL, - }) - g.Expect(err).ToNot(HaveOccurred()) - - // Create commits and tags. - // Keep a record of all the tags and commit refs. - refs := make(map[string]string, len(tags)) - for _, tt := range tags { - ref, err := commitFile(repo, "tag", tt.tag, tt.commitTime) - g.Expect(err).ToNot(HaveOccurred()) - _, err = tag(repo, ref, tt.annotated, tt.tag, tt.tagTime) - g.Expect(err).ToNot(HaveOccurred()) - refs[tt.tag] = ref.String() - } - - // Push everything. - err = repo.Push(&extgogit.PushOptions{ - RefSpecs: []config.RefSpec{"refs/*:refs/*"}, - }) - g.Expect(err).ToNot(HaveOccurred()) - - // Test cases. - type testCase struct { - name string - constraint string - expectErr error - expectTag string - } - tests := []testCase{ - { - name: "Orders by SemVer", - constraint: ">0.1.0", - expectTag: "0.2.0", - }, - { - name: "Orders by SemVer and timestamp", - constraint: "<0.2.0", - expectTag: "v0.1.0+build-3", - }, - { - name: "Errors without match", - constraint: ">=1.0.0", - expectErr: errors.New("no match found for semver: >=1.0.0"), - }, - } - testFunc := func(tt testCase, impl git.Implementation) func(t *testing.T) { - return func(t *testing.T) { - g := NewWithT(t) - - // Get the checkout strategy. - checkoutOpts := git.CheckoutOptions{ - SemVer: tt.constraint, - } - checkoutStrategy, err := CheckoutStrategyForImplementation(context.TODO(), impl, checkoutOpts) - g.Expect(err).ToNot(HaveOccurred()) - - // Checkout and verify. - tmpDir := t.TempDir() - - cc, err := checkoutStrategy.Checkout(context.TODO(), tmpDir, repoURL, authOpts) - if tt.expectErr != nil { - g.Expect(err).To(Equal(tt.expectErr)) - g.Expect(cc).To(BeNil()) - return - } - - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cc.String()).To(Equal(tt.expectTag + "/" + refs[tt.expectTag])) - g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile()) - g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.expectTag)) - } - } - - // Run the test cases against the git implementations. - for _, gitImpl := range gitImpls { - for _, tt := range tests { - t.Run(fmt.Sprintf("%s_%s", gitImpl, tt.name), testFunc(tt, gitImpl)) - } - } -} - -func TestCheckoutStrategyForImplementation_WithCtxTimeout(t *testing.T) { - gitImpls := []git.Implementation{gogit.Implementation, libgit2.Implementation} - - type testCase struct { - name string - timeout time.Duration - wantErr bool - } - - cases := []testCase{ - { - name: "fails with short timeout", - timeout: 100 * time.Millisecond, - wantErr: true, - }, - { - name: "succeeds with sufficient timeout", - timeout: 5 * time.Second, - wantErr: false, - }, - } - - // Keeping it low to keep the test run time low. - serverDelay := 500 * time.Millisecond - - testFunc := func(tt testCase, impl git.Implementation) func(t *testing.T) { - return func(*testing.T) { - g := NewWithT(t) - - gitServer, err := gittestserver.NewTempGitServer() - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(gitServer.Root()) - username := "test-user" - password := "test-password" - gitServer.Auth(username, password) - gitServer.KeyDir(gitServer.Root()) - - middleware := func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(serverDelay) - next.ServeHTTP(w, r) - }) - } - gitServer.AddHTTPMiddlewares(middleware) - - g.Expect(gitServer.StartHTTP()).ToNot(HaveOccurred()) - defer gitServer.StopHTTP() - - branch := "main" - repoPath := "bar/test-reponame" - err = gitServer.InitRepo("testdata/repo1", branch, repoPath) - g.Expect(err).ToNot(HaveOccurred()) - - repoURL := gitServer.HTTPAddressWithCredentials() + "/" + repoPath - - authOpts := &git.AuthOptions{ - Transport: git.HTTP, - Username: username, - Password: password, - TransportOptionsURL: getTransportOptionsURL(git.HTTP), - } - - checkoutOpts := git.CheckoutOptions{ - Branch: branch, - } - checkoutStrategy, err := CheckoutStrategyForImplementation(context.TODO(), impl, checkoutOpts) - g.Expect(err).ToNot(HaveOccurred()) - - tmpDir := t.TempDir() - - checkoutCtx, cancel := context.WithTimeout(context.TODO(), tt.timeout) - defer cancel() - - _, gotErr := checkoutStrategy.Checkout(checkoutCtx, tmpDir, repoURL, authOpts) - if tt.wantErr { - g.Expect(gotErr).To(HaveOccurred()) - } else { - g.Expect(gotErr).ToNot(HaveOccurred()) - } - } - } - - // Run the test cases against the git implementations. - for _, gitImpl := range gitImpls { - for _, tt := range cases { - t.Run(fmt.Sprintf("%s_%s", gitImpl, tt.name), testFunc(tt, gitImpl)) - } - } -} - -func commitFile(repo *extgogit.Repository, path, content string, time time.Time) (plumbing.Hash, error) { - wt, err := repo.Worktree() - if err != nil { - return plumbing.Hash{}, err - } - f, err := wt.Filesystem.Create(path) - if err != nil { - return plumbing.Hash{}, err - } - if _, err := f.Write([]byte(content)); err != nil { - if ferr := f.Close(); ferr != nil { - return plumbing.Hash{}, ferr - } - return plumbing.Hash{}, err - } - if err := f.Close(); err != nil { - return plumbing.Hash{}, err - } - if _, err := wt.Add(path); err != nil { - return plumbing.Hash{}, err - } - return wt.Commit("Adding: "+path, &extgogit.CommitOptions{ - Author: mockSignature(time), - Committer: mockSignature(time), - }) -} - -func tag(repo *extgogit.Repository, commit plumbing.Hash, annotated bool, tag string, time time.Time) (*plumbing.Reference, error) { - var opts *extgogit.CreateTagOptions - if annotated { - opts = &extgogit.CreateTagOptions{ - Tagger: mockSignature(time), - Message: "Annotated tag for: " + tag, - } - } - return repo.CreateTag(tag, commit, opts) -} - -func mockSignature(time time.Time) *object.Signature { - return &object.Signature{ - Name: "Jane Doe", - Email: "jane@example.com", - When: time, - } -} - -func getTransportOptionsURL(transport git.TransportType) string { - letterRunes := []rune("abcdefghijklmnopqrstuvwxyz1234567890") - b := make([]rune, 10) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(transport) + "://" + string(b) -} diff --git a/pkg/git/strategy/testdata/certs/ca-config.json b/pkg/git/strategy/testdata/certs/ca-config.json deleted file mode 100644 index 91c0644c6..000000000 --- a/pkg/git/strategy/testdata/certs/ca-config.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "signing": { - "default": { - "expiry": "87600h" - }, - "profiles": { - "web-servers": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } - } -} diff --git a/pkg/git/strategy/testdata/certs/ca-csr.json b/pkg/git/strategy/testdata/certs/ca-csr.json deleted file mode 100644 index 941277bb1..000000000 --- a/pkg/git/strategy/testdata/certs/ca-csr.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "CN": "example.com CA", - "hosts": [ - "127.0.0.1", - "localhost", - "example.com", - "www.example.com" - ] -} diff --git a/pkg/git/strategy/testdata/certs/ca-key.pem b/pkg/git/strategy/testdata/certs/ca-key.pem deleted file mode 100644 index b69de5ab5..000000000 --- a/pkg/git/strategy/testdata/certs/ca-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49 -AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H -dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA== ------END EC PRIVATE KEY----- diff --git a/pkg/git/strategy/testdata/certs/ca.csr b/pkg/git/strategy/testdata/certs/ca.csr deleted file mode 100644 index baa8aeb26..000000000 --- a/pkg/git/strategy/testdata/certs/ca.csr +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x -PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt -cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU -EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz -b34Wow== ------END CERTIFICATE REQUEST----- diff --git a/pkg/git/strategy/testdata/certs/ca.pem b/pkg/git/strategy/testdata/certs/ca.pem deleted file mode 100644 index 080bd24e6..000000000 --- a/pkg/git/strategy/testdata/certs/ca.pem +++ /dev/null @@ -1,11 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw -NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE -AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ -4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O -1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb -OD8EjjCMY69RMO0= ------END CERTIFICATE----- diff --git a/pkg/git/strategy/testdata/certs/server-key.pem b/pkg/git/strategy/testdata/certs/server-key.pem deleted file mode 100644 index 5054ff39f..000000000 --- a/pkg/git/strategy/testdata/certs/server-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49 -AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3 -fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg== ------END EC PRIVATE KEY----- diff --git a/pkg/git/strategy/testdata/certs/server.csr b/pkg/git/strategy/testdata/certs/server.csr deleted file mode 100644 index 5caf7b39c..000000000 --- a/pkg/git/strategy/testdata/certs/server.csr +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86gSzBJBgkqhkiG9w0BCQ4xPDA6 -MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiB5A6wvQ5x6g/zhiyn+wLzXsOaB -Gb/F25p/zTHHQqZbkwIhAPUgWzy/2bs6eZEi97bSlaRdmrqHwqT842t5sEwGyXNV ------END CERTIFICATE REQUEST----- diff --git a/pkg/git/strategy/testdata/certs/server.pem b/pkg/git/strategy/testdata/certs/server.pem deleted file mode 100644 index 11c655a0b..000000000 --- a/pkg/git/strategy/testdata/certs/server.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB7TCCAZKgAwIBAgIUB+17B8PU05wVTzRHLeG+S+ybZK4wCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMzAw -NDE1MDgxODAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86jgbowgbcwDgYDVR0PAQH/BAQD -AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTM8HS5EIlVMBYv/300jN8PEArUgDAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAOgB -5W82FEgiTTOmsNRekkK5jUPbj4D4eHtb2/BI7ph4AiEA2AxHASIFBdv5b7Qf5prb -bdNmUCzAvVuCAKuMjg2OPrE= ------END CERTIFICATE----- diff --git a/pkg/git/strategy/testdata/repo1/foo.txt b/pkg/git/strategy/testdata/repo1/foo.txt deleted file mode 100644 index 16b14f5da..000000000 --- a/pkg/git/strategy/testdata/repo1/foo.txt +++ /dev/null @@ -1 +0,0 @@ -test file diff --git a/pkg/git/testdata/git/repo/foo.txt b/pkg/git/testdata/git/repo/foo.txt deleted file mode 100644 index 16b14f5da..000000000 --- a/pkg/git/testdata/git/repo/foo.txt +++ /dev/null @@ -1 +0,0 @@ -test file diff --git a/pkg/minio/minio.go b/pkg/minio/minio.go deleted file mode 100644 index f1930dbd5..000000000 --- a/pkg/minio/minio.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package minio - -import ( - "context" - "errors" - "fmt" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - corev1 "k8s.io/api/core/v1" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" -) - -// MinioClient is a minimal Minio client for fetching files from S3 compatible -// storage APIs. -type MinioClient struct { - *minio.Client -} - -// NewClient creates a new Minio storage client. -func NewClient(bucket *sourcev1.Bucket, secret *corev1.Secret) (*MinioClient, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, - BucketLookup: minio.BucketLookupPath, - } - - if secret != nil { - var accessKey, secretKey string - if k, ok := secret.Data["accesskey"]; ok { - accessKey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretKey = string(k) - } - if accessKey != "" && secretKey != "" { - opt.Creds = credentials.NewStaticV4(accessKey, secretKey, "") - } - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") - } - - client, err := minio.New(bucket.Spec.Endpoint, &opt) - if err != nil { - return nil, err - } - return &MinioClient{Client: client}, nil -} - -// ValidateSecret validates the credential secret. The provided Secret may -// be nil. -func ValidateSecret(secret *corev1.Secret) error { - if secret == nil { - return nil - } - err := fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - if _, ok := secret.Data["accesskey"]; !ok { - return err - } - if _, ok := secret.Data["secretkey"]; !ok { - return err - } - return nil -} - -// FGetObject gets the object from the provided object storage bucket, and -// writes it to targetPath. -// It returns the etag of the successfully fetched file, or any error. -func (c *MinioClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { - stat, err := c.Client.StatObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - return "", err - } - opts := minio.GetObjectOptions{} - if err = opts.SetMatchETag(stat.ETag); err != nil { - return "", err - } - if err = c.Client.FGetObject(ctx, bucketName, objectName, localPath, opts); err != nil { - return "", err - } - return stat.ETag, nil -} - -// VisitObjects iterates over the items in the provided object storage -// bucket, calling visit for every item. -// If the underlying client or the visit callback returns an error, -// it returns early. -func (c *MinioClient) VisitObjects(ctx context.Context, bucketName string, visit func(key, etag string) error) error { - for object := range c.Client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*c.Client.EndpointURL()), - }) { - if object.Err != nil { - err := fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, object.Err) - return err - } - - if err := visit(object.Key, object.ETag); err != nil { - return err - } - } - return nil -} - -// ObjectIsNotFound checks if the error provided is a minio.ErrResponse -// with "NoSuchKey" code. -func (c *MinioClient) ObjectIsNotFound(err error) bool { - if resp := new(minio.ErrorResponse); errors.As(err, resp) { - return resp.Code == "NoSuchKey" - } - return false -} - -// Close closes the Minio Client and logs any useful errors. -func (c *MinioClient) Close(_ context.Context) { - // Minio client does not provide a close method -} diff --git a/pkg/minio/minio_test.go b/pkg/minio/minio_test.go deleted file mode 100644 index 4b8798cc0..000000000 --- a/pkg/minio/minio_test.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package minio - -import ( - "context" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/fluxcd/pkg/apis/meta" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" - "github.com/fluxcd/source-controller/pkg/sourceignore" - - "github.com/google/uuid" - miniov7 "github.com/minio/minio-go/v7" - "gotest.tools/assert" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - objectName string = "test.yaml" - objectEtag string = "2020beab5f1711919157756379622d1d" -) - -var ( - minioClient *MinioClient - bucketName = "test-bucket-minio" + uuid.New().String() - secret = corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-secret", - Namespace: "default", - }, - Data: map[string][]byte{ - "accesskey": []byte("Q3AM3UQ867SPQQA43P2F"), - "secretkey": []byte("zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"), - }, - Type: "Opaque", - } - emptySecret = corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-secret", - Namespace: "default", - }, - Data: map[string][]byte{}, - Type: "Opaque", - } - bucket = sourcev1.Bucket{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-test-bucket", - Namespace: "default", - }, - Spec: sourcev1.BucketSpec{ - BucketName: bucketName, - Endpoint: "play.min.io", - Provider: "generic", - Insecure: true, - SecretRef: &meta.LocalObjectReference{ - Name: secret.Name, - }, - }, - } - bucketAwsProvider = sourcev1.Bucket{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-test-bucket", - Namespace: "default", - }, - Spec: sourcev1.BucketSpec{ - BucketName: bucketName, - Endpoint: "play.min.io", - Provider: "aws", - Insecure: true, - }, - } -) - -func TestMain(m *testing.M) { - var err error - ctx := context.Background() - minioClient, err = NewClient(bucket.DeepCopy(), secret.DeepCopy()) - if err != nil { - log.Fatal(err) - } - createBucket(ctx) - addObjectToBucket(ctx) - run := m.Run() - removeObjectFromBucket(ctx) - deleteBucket(ctx) - os.Exit(run) -} - -func TestNewClient(t *testing.T) { - minioClient, err := NewClient(bucket.DeepCopy(), secret.DeepCopy()) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) -} - -func TestNewClientEmptySecret(t *testing.T) { - minioClient, err := NewClient(bucket.DeepCopy(), emptySecret.DeepCopy()) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) -} - -func TestNewClientAwsProvider(t *testing.T) { - minioClient, err := NewClient(bucketAwsProvider.DeepCopy(), nil) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) -} - -func TestBucketExists(t *testing.T) { - ctx := context.Background() - exists, err := minioClient.BucketExists(ctx, bucketName) - assert.NilError(t, err) - assert.Assert(t, exists) -} - -func TestBucketNotExists(t *testing.T) { - ctx := context.Background() - exists, err := minioClient.BucketExists(ctx, "notexistsbucket") - assert.NilError(t, err) - assert.Assert(t, !exists) -} - -func TestFGetObject(t *testing.T) { - ctx := context.Background() - tempDir := t.TempDir() - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - _, err := minioClient.FGetObject(ctx, bucketName, objectName, path) - assert.NilError(t, err) -} - -func TestFGetObjectNotExists(t *testing.T) { - ctx := context.Background() - tempDir := t.TempDir() - badKey := "invalid.txt" - path := filepath.Join(tempDir, badKey) - _, err := minioClient.FGetObject(ctx, bucketName, badKey, path) - assert.Error(t, err, "The specified key does not exist.") - assert.Check(t, minioClient.ObjectIsNotFound(err)) -} - -func TestVisitObjects(t *testing.T) { - keys := []string{} - etags := []string{} - err := minioClient.VisitObjects(context.TODO(), bucketName, func(key, etag string) error { - keys = append(keys, key) - etags = append(etags, etag) - return nil - }) - assert.NilError(t, err) - assert.DeepEqual(t, keys, []string{objectName}) - assert.DeepEqual(t, etags, []string{objectEtag}) -} - -func TestVisitObjectsErr(t *testing.T) { - ctx := context.Background() - badBucketName := "bad-bucket" - err := minioClient.VisitObjects(ctx, badBucketName, func(string, string) error { - return nil - }) - assert.Error(t, err, fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName)) -} - -func TestVisitObjectsCallbackErr(t *testing.T) { - mockErr := fmt.Errorf("mock") - err := minioClient.VisitObjects(context.TODO(), bucketName, func(key, etag string) error { - return mockErr - }) - assert.Error(t, err, mockErr.Error()) -} - -func TestValidateSecret(t *testing.T) { - t.Parallel() - testCases := []struct { - name string - secret *corev1.Secret - error bool - }{ - { - name: "valid secret", - secret: secret.DeepCopy(), - }, - { - name: "nil secret", - secret: nil, - }, - { - name: "invalid secret", - secret: emptySecret.DeepCopy(), - error: true, - }, - } - for _, testCase := range testCases { - tt := testCase - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - err := ValidateSecret(tt.secret) - if tt.error { - assert.Error(t, err, fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name)) - } else { - assert.NilError(t, err) - } - }) - } -} - -func createBucket(ctx context.Context) { - if err := minioClient.Client.MakeBucket(ctx, bucketName, miniov7.MakeBucketOptions{}); err != nil { - exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) - if errBucketExists == nil && exists { - deleteBucket(ctx) - } else { - log.Fatalln(err) - } - } -} - -func deleteBucket(ctx context.Context) { - if err := minioClient.Client.RemoveBucket(ctx, bucketName); err != nil { - log.Println(err) - } -} - -func addObjectToBucket(ctx context.Context) { - fileReader := strings.NewReader(getObjectFile()) - fileSize := fileReader.Size() - _, err := minioClient.Client.PutObject(ctx, bucketName, objectName, fileReader, fileSize, miniov7.PutObjectOptions{ - ContentType: "text/x-yaml", - }) - if err != nil { - log.Println(err) - } -} - -func removeObjectFromBucket(ctx context.Context) { - if err := minioClient.Client.RemoveObject(ctx, bucketName, objectName, miniov7.RemoveObjectOptions{ - GovernanceBypass: true, - }); err != nil { - log.Println(err) - } -} - -func getObjectFile() string { - return ` - apiVersion: source.toolkit.fluxcd.io/v1beta2 - kind: Bucket - metadata: - name: podinfo - namespace: default - spec: - interval: 5m - provider: aws - bucketName: podinfo - endpoint: s3.amazonaws.com - region: us-east-1 - timeout: 30s - ` -} diff --git a/pkg/sourceignore/sourceignore.go b/pkg/sourceignore/sourceignore.go deleted file mode 100644 index 38327d38a..000000000 --- a/pkg/sourceignore/sourceignore.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sourceignore - -import ( - "bufio" - "io" - "os" - "path/filepath" - "strings" - - "github.com/go-git/go-git/v5/plumbing/format/gitignore" -) - -const ( - IgnoreFile = ".sourceignore" - ExcludeVCS = ".git/,.gitignore,.gitmodules,.gitattributes" - ExcludeExt = "*.jpg,*.jpeg,*.gif,*.png,*.wmv,*.flv,*.tar.gz,*.zip" - ExcludeCI = ".github/,.circleci/,.travis.yml,.gitlab-ci.yml,appveyor.yml,.drone.yml,cloudbuild.yaml,codeship-services.yml,codeship-steps.yml" - ExcludeExtra = "**/.goreleaser.yml,**/.sops.yaml,**/.flux.yaml" -) - -// NewMatcher returns a gitignore.Matcher for the given gitignore.Pattern -// slice. It mainly exists to compliment the API. -func NewMatcher(ps []gitignore.Pattern) gitignore.Matcher { - return gitignore.NewMatcher(ps) -} - -// NewDefaultMatcher returns a gitignore.Matcher with the DefaultPatterns -// as lowest priority patterns. -func NewDefaultMatcher(ps []gitignore.Pattern, domain []string) gitignore.Matcher { - var defaultPs []gitignore.Pattern - defaultPs = append(defaultPs, VCSPatterns(domain)...) - defaultPs = append(defaultPs, DefaultPatterns(domain)...) - ps = append(defaultPs, ps...) - return gitignore.NewMatcher(ps) -} - -// VCSPatterns returns a gitignore.Pattern slice with ExcludeVCS -// patterns. -func VCSPatterns(domain []string) []gitignore.Pattern { - var ps []gitignore.Pattern - for _, p := range strings.Split(ExcludeVCS, ",") { - ps = append(ps, gitignore.ParsePattern(p, domain)) - } - return ps -} - -// DefaultPatterns returns a gitignore.Pattern slice with the default -// ExcludeExt, ExcludeCI, ExcludeExtra patterns. -func DefaultPatterns(domain []string) []gitignore.Pattern { - all := strings.Join([]string{ExcludeExt, ExcludeCI, ExcludeExtra}, ",") - var ps []gitignore.Pattern - for _, p := range strings.Split(all, ",") { - ps = append(ps, gitignore.ParsePattern(p, domain)) - } - return ps -} - -// ReadPatterns collects ignore patterns from the given reader and -// returns them as a gitignore.Pattern slice. -// If a domain is supplied, this is used as the scope of the read -// patterns. -func ReadPatterns(reader io.Reader, domain []string) []gitignore.Pattern { - var ps []gitignore.Pattern - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - s := scanner.Text() - if !strings.HasPrefix(s, "#") && len(strings.TrimSpace(s)) > 0 { - ps = append(ps, gitignore.ParsePattern(s, domain)) - } - } - return ps -} - -// ReadIgnoreFile attempts to read the file at the given path and -// returns the read patterns. -func ReadIgnoreFile(path string, domain []string) ([]gitignore.Pattern, error) { - var ps []gitignore.Pattern - if f, err := os.Open(path); err == nil { - defer f.Close() - ps = append(ps, ReadPatterns(f, domain)...) - } else if !os.IsNotExist(err) { - return nil, err - } - return ps, nil -} - -// LoadIgnorePatterns recursively loads the IgnoreFile patterns found -// in the directory. -func LoadIgnorePatterns(dir string, domain []string) ([]gitignore.Pattern, error) { - ps, err := ReadIgnoreFile(filepath.Join(dir, IgnoreFile), domain) - if err != nil { - return nil, err - } - fis, err := os.ReadDir(dir) - if err != nil { - return nil, err - } - for _, fi := range fis { - if fi.IsDir() && fi.Name() != ".git" { - var subps []gitignore.Pattern - if subps, err = LoadIgnorePatterns(filepath.Join(dir, fi.Name()), append(domain, fi.Name())); err != nil { - return nil, err - } - if len(subps) > 0 { - ps = append(ps, subps...) - } - } - } - return ps, nil -} diff --git a/pkg/sourceignore/sourceignore_test.go b/pkg/sourceignore/sourceignore_test.go deleted file mode 100644 index 5ba78cda8..000000000 --- a/pkg/sourceignore/sourceignore_test.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sourceignore - -import ( - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "gotest.tools/assert" -) - -func TestReadPatterns(t *testing.T) { - tests := []struct { - name string - ignore string - domain []string - matches []string - mismatches []string - }{ - { - name: "simple", - ignore: `ignore-dir/* -!ignore-dir/include -`, - matches: []string{"ignore-dir/file.yaml"}, - mismatches: []string{"file.yaml", "ignore-dir/include"}, - }, - { - name: "with comments", - ignore: `ignore-dir/* -# !ignore-dir/include`, - matches: []string{"ignore-dir/file.yaml", "ignore-dir/include"}, - }, - { - name: "domain scoped", - domain: []string{"domain", "scoped"}, - ignore: "ignore-dir/*", - matches: []string{"domain/scoped/ignore-dir/file.yaml"}, - mismatches: []string{"ignore-dir/file.yaml"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - reader := strings.NewReader(tt.ignore) - ps := ReadPatterns(reader, tt.domain) - matcher := NewMatcher(ps) - for _, m := range tt.matches { - assert.Equal(t, matcher.Match(strings.Split(m, "/"), false), true, "expected %s to match", m) - } - for _, m := range tt.mismatches { - assert.Equal(t, matcher.Match(strings.Split(m, "/"), false), false, "expected %s to not match", m) - } - }) - } -} - -func TestReadIgnoreFile(t *testing.T) { - f, err := os.CreateTemp("", IgnoreFile) - if err != nil { - t.Fatal(err) - } - defer os.Remove(f.Name()) - if _, err = f.Write([]byte(`# .sourceignore -ignore-this.txt`)); err != nil { - t.Fatal(err) - } - f.Close() - - tests := []struct { - name string - path string - domain []string - want []gitignore.Pattern - }{ - { - name: IgnoreFile, - path: f.Name(), - want: []gitignore.Pattern{ - gitignore.ParsePattern("ignore-this.txt", nil), - }, - }, - { - name: "with domain", - path: f.Name(), - domain: strings.Split(filepath.Dir(f.Name()), string(filepath.Separator)), - want: []gitignore.Pattern{ - gitignore.ParsePattern("ignore-this.txt", strings.Split(filepath.Dir(f.Name()), string(filepath.Separator))), - }, - }, - { - name: "non existing", - path: "", - want: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ReadIgnoreFile(tt.path, tt.domain) - if err != nil { - t.Error(err) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("ReadIgnoreFile() got = %d, want %#v", got, tt.want) - } - }) - } -} - -func TestVCSPatterns(t *testing.T) { - tests := []struct { - name string - domain []string - patterns []gitignore.Pattern - matches []string - mismatches []string - }{ - { - name: "simple matches", - matches: []string{".git/config", ".gitignore"}, - mismatches: []string{"workload.yaml", "workload.yml", "simple.txt"}, - }, - { - name: "domain scoped matches", - domain: []string{"directory"}, - matches: []string{"directory/.git/config", "directory/.gitignore"}, - mismatches: []string{"other/.git/config"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - matcher := NewDefaultMatcher(tt.patterns, tt.domain) - for _, m := range tt.matches { - assert.Equal(t, matcher.Match(strings.Split(m, "/"), false), true, "expected %s to match", m) - } - for _, m := range tt.mismatches { - assert.Equal(t, matcher.Match(strings.Split(m, "/"), false), false, "expected %s to not match", m) - } - }) - } -} - -func TestDefaultPatterns(t *testing.T) { - tests := []struct { - name string - domain []string - patterns []gitignore.Pattern - matches []string - mismatches []string - }{ - { - name: "simple matches", - matches: []string{"image.jpg", "archive.tar.gz", ".github/workflows/workflow.yaml", "subdir/.flux.yaml", "subdir2/.sops.yaml"}, - mismatches: []string{"workload.yaml", "workload.yml", "simple.txt"}, - }, - { - name: "domain scoped matches", - domain: []string{"directory"}, - matches: []string{"directory/image.jpg", "directory/archive.tar.gz"}, - mismatches: []string{"other/image.jpg", "other/archive.tar.gz"}, - }, - { - name: "patterns", - patterns: []gitignore.Pattern{gitignore.ParsePattern("!*.jpg", nil)}, - mismatches: []string{"image.jpg"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - matcher := NewDefaultMatcher(tt.patterns, tt.domain) - for _, m := range tt.matches { - assert.Equal(t, matcher.Match(strings.Split(m, "/"), false), true, "expected %s to match", m) - } - for _, m := range tt.mismatches { - assert.Equal(t, matcher.Match(strings.Split(m, "/"), false), false, "expected %s to not match", m) - } - }) - } -} - -func TestLoadExcludePatterns(t *testing.T) { - tmpDir := t.TempDir() - files := map[string]string{ - ".sourceignore": "root.txt", - "d/.gitignore": "ignored", - "z/.sourceignore": "last.txt", - "a/b/.sourceignore": "subdir.txt", - } - for n, c := range files { - if err := os.MkdirAll(filepath.Join(tmpDir, filepath.Dir(n)), 0o750); err != nil { - t.Fatal(err) - } - if err := os.WriteFile(filepath.Join(tmpDir, n), []byte(c), 0o640); err != nil { - t.Fatal(err) - } - } - tests := []struct { - name string - dir string - domain []string - want []gitignore.Pattern - }{ - { - name: "traverse loads", - dir: tmpDir, - want: []gitignore.Pattern{ - gitignore.ParsePattern("root.txt", nil), - gitignore.ParsePattern("subdir.txt", []string{"a", "b"}), - gitignore.ParsePattern("last.txt", []string{"z"}), - }, - }, - { - name: "domain", - dir: tmpDir, - domain: strings.Split(tmpDir, string(filepath.Separator)), - want: []gitignore.Pattern{ - gitignore.ParsePattern("root.txt", strings.Split(tmpDir, string(filepath.Separator))), - gitignore.ParsePattern("subdir.txt", append(strings.Split(tmpDir, string(filepath.Separator)), "a", "b")), - gitignore.ParsePattern("last.txt", append(strings.Split(tmpDir, string(filepath.Separator)), "z")), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := LoadIgnorePatterns(tt.dir, tt.domain) - if err != nil { - t.Error(err) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("LoadIgnorePatterns() got = %#v, want %#v", got, tt.want) - for _, v := range got { - t.Error(v) - } - } - }) - } -} diff --git a/tests/fuzz/Dockerfile.builder b/tests/fuzz/Dockerfile.builder index 2c010408b..0b45115bb 100644 --- a/tests/fuzz/Dockerfile.builder +++ b/tests/fuzz/Dockerfile.builder @@ -1,6 +1,15 @@ FROM gcr.io/oss-fuzz-base/base-builder-go -COPY ./ $GOPATH/src/github.com/fluxcd/source-controller/ -COPY ./tests/fuzz/oss_fuzz_build.sh $SRC/build.sh +RUN wget https://go.dev/dl/go1.24.0.linux-amd64.tar.gz \ + && mkdir temp-go \ + && rm -rf /root/.go/* \ + && tar -C temp-go/ -xzf go1.24.0.linux-amd64.tar.gz \ + && mv temp-go/go/* /root/.go/ + +ENV SRC=$GOPATH/src/github.com/fluxcd/source-controller +ENV FLUX_CI=true + +COPY ./ $SRC +RUN wget https://raw.githubusercontent.com/google/oss-fuzz/master/projects/fluxcd/build.sh -O $SRC/build.sh WORKDIR $SRC diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md new file mode 100644 index 000000000..2ae2cddb6 --- /dev/null +++ b/tests/fuzz/README.md @@ -0,0 +1,82 @@ +# fuzz testing + +Flux is part of Google's [oss fuzz] program which provides continuous fuzzing for +open source projects. + +The long running fuzzing execution is configured in the [oss-fuzz repository]. +Shorter executions are done on a per-PR basis, configured as a [github workflow]. + +### Testing locally + +Build fuzzers: + +```bash +make fuzz-build +``` +All fuzzers will be built into `./build/fuzz/out`. + +Smoke test fuzzers: + +All the fuzzers will be built and executed once, to ensure they are fully functional. + +```bash +make fuzz-smoketest +``` + +Run fuzzer locally: +```bash +./build/fuzz/out/fuzz_conditions_match +``` + +Run fuzzer inside a container: + +```bash + docker run --rm -ti \ + -v "$(pwd)/build/fuzz/out":/out \ + gcr.io/oss-fuzz/fluxcd \ + /out/fuzz_conditions_match +``` + +### Caveats of creating oss-fuzz compatible tests + +#### Segregate fuzz tests + +OSS-Fuzz does not properly support mixed `*_test.go` files, in which there is a combination +of fuzz and non-fuzz tests. To mitigate this problem, ensure your fuzz tests are not in the +same file as other Go tests. As a pattern, call your fuzz test files `*_fuzz_test.go`. + +#### Build tags to avoid conflicts when running Go tests + +Due to the issue above, code duplication will occur when creating fuzz tests that rely on +helper functions that are shared with other tests. To avoid build issues, add a conditional +build tag at the top of the `*_fuzz_test.go` file: +```go +//go:build gofuzz_libfuzzer +// +build gofuzz_libfuzzer +``` + +The build tag above is set at [go-118-fuzz-build]. +At this point in time we can't pass on specific tags from [compile_native_go_fuzzer]. + +### Running oss-fuzz locally + +The `make fuzz-smoketest` is meant to be an easy way to reproduce errors that may occur +upstream. If our checks ever run out of sync with upstream, the upstream tests can be +executed locally with: + +``` +git clone --depth 1 https://github.com/google/oss-fuzz +cd oss-fuzz +python infra/helper.py build_image fluxcd +python infra/helper.py build_fuzzers --sanitizer address --architecture x86_64 fluxcd +python infra/helper.py check_build --sanitizer address --architecture x86_64 fluxcd +``` + +For latest info on testing oss-fuzz locally, refer to the [upstream guide]. + +[oss fuzz]: https://github.com/google/oss-fuzz +[oss-fuzz repository]: https://github.com/google/oss-fuzz/tree/master/projects/fluxcd +[github workflow]: .github/workflows/cifuzz.yaml +[upstream guide]: https://google.github.io/oss-fuzz/getting-started/new-project-guide/#testing-locally +[go-118-fuzz-build]: https://github.com/AdamKorcz/go-118-fuzz-build/blob/b2031950a318d4f2dcf3ec3e128f904d5cf84623/main.go#L40 +[compile_native_go_fuzzer]: https://github.com/google/oss-fuzz/blob/c2d827cb78529fdc757c9b0b4fea0f1238a54814/infra/base-images/base-builder/compile_native_go_fuzzer#L32 diff --git a/tests/fuzz/go.mod b/tests/fuzz/go.mod deleted file mode 100644 index 638f6fdd9..000000000 --- a/tests/fuzz/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/fluxcd/source-controller/tests/fuzz - -go 1.18 - -replace github.com/fluxcd/kustomize-controller/api => ../../api - -replace github.com/fluxcd/kustomize-controller => ../../ diff --git a/tests/fuzz/native_go_run.sh b/tests/fuzz/native_go_run.sh new file mode 100755 index 000000000..a62410273 --- /dev/null +++ b/tests/fuzz/native_go_run.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Flux authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +# This script iterates through all go fuzzing targets, running each one +# through the period of time established by FUZZ_TIME. + +FUZZ_TIME=${FUZZ_TIME:-"5s"} + +# kustomization_fuzzer_test is not fully compatible with Go native fuzz, +# so it is ignored here. +test_files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' . | \ + grep -v "controllers_fuzzer_test.go") + +for file in ${test_files} +do + targets=$(grep -oP 'func \K(Fuzz\w*)' "${file}") + for target_name in ${targets} + do + echo "Running ${file}.${target_name} for ${FUZZ_TIME}." + file_dir=$(dirname "${file}") + + go test -fuzz="^${target_name}\$" -fuzztime "${FUZZ_TIME}" "${file_dir}" + done +done diff --git a/tests/fuzz/oss_fuzz_build.sh b/tests/fuzz/oss_fuzz_build.sh deleted file mode 100755 index 2284cf579..000000000 --- a/tests/fuzz/oss_fuzz_build.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2022 The Flux authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euxo pipefail - -LIBGIT2_TAG="${LIBGIT2_TAG:-v0.2.0}" -GOPATH="${GOPATH:-/root/go}" -GO_SRC="${GOPATH}/src" -PROJECT_PATH="github.com/fluxcd/source-controller" - -pushd "${GO_SRC}/${PROJECT_PATH}" - -export TARGET_DIR="$(/bin/pwd)/build/libgit2/${LIBGIT2_TAG}" - -# For most cases, libgit2 will already be present. -# The exception being at the oss-fuzz integration. -if [ ! -d "${TARGET_DIR}" ]; then - curl -o output.tar.gz -LO "https://github.com/fluxcd/golang-with-libgit2/releases/download/${LIBGIT2_TAG}/linux-x86_64-libgit2-only.tar.gz" - - DIR=linux-libgit2-only - NEW_DIR="$(/bin/pwd)/build/libgit2/${LIBGIT2_TAG}" - INSTALLED_DIR="/home/runner/work/golang-with-libgit2/golang-with-libgit2/build/${DIR}" - - mkdir -p ./build/libgit2 - - tar -xf output.tar.gz - rm output.tar.gz - mv "${DIR}" "${LIBGIT2_TAG}" - mv "${LIBGIT2_TAG}/" "./build/libgit2" - - # Update the prefix paths included in the .pc files. - # This will make it easier to update to the location in which they will be used. - find "${NEW_DIR}" -type f -name "*.pc" | xargs -I {} sed -i "s;${INSTALLED_DIR};${NEW_DIR};g" {} -fi - -apt-get update && apt-get install -y pkg-config - -export CGO_ENABLED=1 -export PKG_CONFIG_PATH="${TARGET_DIR}/lib/pkgconfig" -export CGO_LDFLAGS="$(pkg-config --libs --static --cflags libgit2)" -export LIBRARY_PATH="${TARGET_DIR}/lib" -export CGO_CFLAGS="-I${TARGET_DIR}/include" - -go get -d github.com/AdaLogics/go-fuzz-headers - -# The implementation of libgit2 is sensitive to the versions of git2go. -# Leaving it to its own devices, the minimum version of git2go used may not -# be compatible with the currently implemented version. Hence the modifications -# of the existing go.mod. -sed "s;\./api;$(/bin/pwd)/api;g" go.mod > tests/fuzz/go.mod -sed -i 's;module github.com/fluxcd/source-controller;module github.com/fluxcd/source-controller/tests/fuzz;g' tests/fuzz/go.mod -echo "replace github.com/fluxcd/source-controller => $(/bin/pwd)/" >> tests/fuzz/go.mod - -cp go.sum tests/fuzz/go.sum - -pushd "tests/fuzz" - -go mod download - -go get -d github.com/AdaLogics/go-fuzz-headers -go get -d github.com/fluxcd/source-controller - -# Setup files to be embedded into controllers_fuzzer.go's testFiles variable. -mkdir -p testdata/crd -cp ../../config/crd/bases/*.yaml testdata/crd/ -cp -r ../../controllers/testdata/certs testdata/ - -go get -d github.com/AdaLogics/go-fuzz-headers - -# Using compile_go_fuzzer to compile fails when statically linking libgit2 dependencies -# via CFLAGS/CXXFLAGS. -function go_compile(){ - function=$1 - fuzzer=$2 - - if [[ $SANITIZER = *coverage* ]]; then - # ref: https://github.com/google/oss-fuzz/blob/master/infra/base-images/base-builder/compile_go_fuzzer - compile_go_fuzzer "${PROJECT_PATH}/tests/fuzz" "${function}" "${fuzzer}" - else - go-fuzz -tags gofuzz -func="${function}" -o "${fuzzer}.a" . - ${CXX} ${CXXFLAGS} ${LIB_FUZZING_ENGINE} -o "${OUT}/${fuzzer}" \ - "${fuzzer}.a" "${TARGET_DIR}/lib/libgit2.a" \ - -fsanitize="${SANITIZER}" - fi -} - -go_compile FuzzRandomGitFiles fuzz_gitrepository_fuzzer -go_compile FuzzGitResourceObject fuzz_git_resource_object - -# By now testdata is embedded in the binaries and no longer needed. -# Remove the dir given that it will be owned by root otherwise. -rm -rf testdata/ - -popd -popd diff --git a/pkg/git/strategy/testdata/certs/Makefile b/tests/fuzz/oss_fuzz_prebuild.sh old mode 100644 new mode 100755 similarity index 55% rename from pkg/git/strategy/testdata/certs/Makefile rename to tests/fuzz/oss_fuzz_prebuild.sh index 5ec8f26c6..18617939e --- a/pkg/git/strategy/testdata/certs/Makefile +++ b/tests/fuzz/oss_fuzz_prebuild.sh @@ -1,4 +1,6 @@ -# Copyright 2021 The Flux authors +#!/usr/bin/env bash + +# Copyright 2022 The Flux authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,19 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -all: server-key.pem +set -euxo pipefail -ca-key.pem: ca-csr.json - cfssl gencert -initca ca-csr.json | cfssljson -bare ca – -ca.pem: ca-key.pem -ca.csr: ca-key.pem +# This file is executed by upstream oss-fuzz for any requirements that +# are specific for building this project. -server-key.pem: server-csr.json ca-config.json ca-key.pem - cfssl gencert \ - -ca=ca.pem \ - -ca-key=ca-key.pem \ - -config=ca-config.json \ - -profile=web-servers \ - server-csr.json | cfssljson -bare server -sever.pem: server-key.pem -server.csr: server-key.pem +# Some tests requires embedded resources. Embedding does not allow +# for traversing into ascending dirs, therefore we copy those contents here: +mkdir -p internal/controller/testdata/crd +cp config/crd/bases/*.yaml internal/controller/testdata/crd/ diff --git a/tests/fuzz/oss_fuzz_run.sh b/tests/fuzz/oss_fuzz_run.sh index 4c87f489b..12912e51a 100755 --- a/tests/fuzz/oss_fuzz_run.sh +++ b/tests/fuzz/oss_fuzz_run.sh @@ -17,4 +17,4 @@ set -euxo pipefail # run each fuzzer once to ensure they are working properly -find /out -type f -name "fuzz*" -exec echo {} -runs=1 \; | bash -e +find /out -type f -iname "fuzz*" -exec echo {} -runs=1 \; | bash -e diff --git a/tests/listener/listener.go b/tests/listener/listener.go new file mode 100644 index 000000000..289b2adf0 --- /dev/null +++ b/tests/listener/listener.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testlistener + +import ( + "net" + "strconv" + "strings" + "testing" + + . "github.com/onsi/gomega" +) + +// New creates a TCP listener on a random port and returns +// the listener, the address and the port of this listener. +// It also registers a cleanup function to close the listener +// when the test ends. +func New(t *testing.T) (net.Listener, string, int) { + t.Helper() + + lis, err := net.Listen("tcp", "localhost:0") + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { lis.Close() }) + + addr := lis.Addr().String() + addrParts := strings.Split(addr, ":") + portStr := addrParts[len(addrParts)-1] + port, err := strconv.Atoi(portStr) + g.Expect(err).NotTo(HaveOccurred()) + + return lis, addr, port +} diff --git a/tests/proxy/proxy.go b/tests/proxy/proxy.go new file mode 100644 index 000000000..33fadece4 --- /dev/null +++ b/tests/proxy/proxy.go @@ -0,0 +1,48 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testproxy + +import ( + "net/http" + "testing" + + "github.com/elazarl/goproxy" + + testlistener "github.com/fluxcd/source-controller/tests/listener" +) + +// New creates a new goproxy server on a random port and returns +// the address and the port of this server. It also registers a +// cleanup functions to close the server and the listener when +// the test ends. +func New(t *testing.T) (string, int) { + t.Helper() + + lis, addr, port := testlistener.New(t) + + handler := goproxy.NewProxyHttpServer() + handler.Verbose = true + + server := &http.Server{ + Addr: addr, + Handler: handler, + } + go server.Serve(lis) + t.Cleanup(func() { server.Close() }) + + return addr, port +} diff --git a/tests/registry/registry.go b/tests/registry/registry.go new file mode 100644 index 000000000..28b36fd20 --- /dev/null +++ b/tests/registry/registry.go @@ -0,0 +1,124 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testregistry + +import ( + "context" + "fmt" + "io" + "net/url" + "strings" + "testing" + "time" + + "github.com/distribution/distribution/v3/configuration" + "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" + "github.com/google/go-containerregistry/pkg/crane" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" + + "github.com/fluxcd/pkg/oci" + + testlistener "github.com/fluxcd/source-controller/tests/listener" +) + +func New(t *testing.T) string { + t.Helper() + + // Get a free random port and release it so the registry can use it. + listener, addr, _ := testlistener.New(t) + err := listener.Close() + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + + config := &configuration.Configuration{} + config.HTTP.Addr = addr + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + config.Log.AccessLog.Disabled = true + config.Log.Level = "error" + logrus.SetOutput(io.Discard) + + r, err := registry.NewRegistry(context.Background(), config) + g.Expect(err).NotTo(HaveOccurred()) + + go r.ListenAndServe() + + return addr +} + +type PodinfoImage struct { + URL string + Tag string + Digest gcrv1.Hash +} + +func CreatePodinfoImageFromTar(tarFilePath, tag, registryURL string, opts ...crane.Option) (*PodinfoImage, error) { + // Create Image + image, err := crane.Load(tarFilePath) + if err != nil { + return nil, err + } + + image = setPodinfoImageAnnotations(image, tag) + + // url.Parse doesn't handle urls with no scheme well e.g localhost: + if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) { + registryURL = fmt.Sprintf("http://%s", registryURL) + } + + myURL, err := url.Parse(registryURL) + if err != nil { + return nil, err + } + repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host) + + // Image digest + podinfoImageDigest, err := image.Digest() + if err != nil { + return nil, err + } + + // Push image + err = crane.Push(image, repositoryURL, opts...) + if err != nil { + return nil, err + } + + // Tag the image + err = crane.Tag(repositoryURL, tag, opts...) + if err != nil { + return nil, err + } + + return &PodinfoImage{ + URL: "oci://" + repositoryURL, + Tag: tag, + Digest: podinfoImageDigest, + }, nil +} + +func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { + metadata := map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: fmt.Sprintf("%s@sha1:b3b00fe35424a45d373bf4c7214178bc36fd7872", tag), + } + return mutate.Annotations(img, metadata).(gcrv1.Image) +}
    FieldDescription +observedIgnore
    + +string + +
    +(Optional) +

    ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

    +
    -provider
    +observedLayerSelector
    -string + +OCILayerSelector +
    -

    Provider specifies the technology used to sign the OCI Artifact.

    +(Optional) +

    ObservedLayerSelector is the observed layer selector used for constructing +the source artifact.

    -secretRef
    +ReconcileRequestStatus
    - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
    -

    SecretRef specifies the Kubernetes Secret containing the -trusted public keys.

    +

    +(Members of ReconcileRequestStatus are embedded into this type.) +