diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..567609b12 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +build/ diff --git a/.github/actions/run-tests/Dockerfile b/.github/actions/run-tests/Dockerfile deleted file mode 100644 index c849027d4..000000000 --- a/.github/actions/run-tests/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM golang:1.15-alpine - -# Add any build or testing essential system packages -RUN apk add --no-cache build-base git pkgconf -RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community libgit2-dev~=1.1 - -# Use the GitHub Actions uid:gid combination for proper fs permissions -RUN addgroup -g 116 -S test && adduser -u 1001 -S -g test test - -# Run as test user -USER test - -ENTRYPOINT ["/bin/sh", "-c"] diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml deleted file mode 100644 index 6679361aa..000000000 --- a/.github/actions/run-tests/action.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: 'Run tests' -description: 'Run tests in docker container' -inputs: - command: - description: 'Command to run inside the container' - required: true - default: 'make test' -runs: - using: 'docker' - image: 'Dockerfile' - args: - - ${{ inputs.command }} diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 000000000..f8796c21f --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,40 @@ +version: 2 + +updates: + - package-ecosystem: "gomod" + directory: "/" + labels: ["dependencies"] + schedule: + interval: "monthly" + groups: + go-deps: + patterns: + - "*" + allow: + - dependency-type: "direct" + ignore: + # Cloud SDK are updated manually + - dependency-name: "cloud.google.com/*" + - dependency-name: "github.com/Azure/azure-sdk-for-go/*" + # Kubernetes deps are updated by fluxcd/pkg/runtime + - dependency-name: "k8s.io/*" + - dependency-name: "sigs.k8s.io/*" + - dependency-name: "github.com/go-logr/*" + # OCI deps are updated by fluxcd/pkg/oci + - dependency-name: "github.com/docker/*" + - dependency-name: "github.com/distribution/*" + - dependency-name: "github.com/google/go-containerregistry*" + - dependency-name: "github.com/opencontainers/*" + # Helm deps are updated by fluxcd/pkg/helmtestserver + - dependency-name: "helm.sh/helm/*" + # Flux APIs are updated at release time + - dependency-name: "github.com/fluxcd/source-controller/api" + - package-ecosystem: "github-actions" + directory: "/" + labels: ["area/ci", "dependencies"] + groups: + ci: + patterns: + - "*" + schedule: + interval: "monthly" diff --git a/.github/labels.yaml b/.github/labels.yaml new file mode 100644 index 000000000..2f3e1d525 --- /dev/null +++ b/.github/labels.yaml @@ -0,0 +1,42 @@ +# Configuration file to declaratively configure labels +# Ref: https://github.com/EndBug/label-sync#Config-files + +- name: area/bucket + description: Bucket related issues and pull requests + color: '#00b140' +- name: area/git + description: Git related issues and pull requests + color: '#863faf' +- name: area/helm + description: Helm related issues and pull requests + color: '#1673b6' +- name: area/oci + description: OCI related issues and pull requests + color: '#c739ff' +- name: area/storage + description: Storage related issues and pull requests + color: '#4b0082' +- name: backport:release/v1.0.x + description: To be backported to release/v1.0.x + color: '#ffd700' +- name: backport:release/v1.1.x + description: To be backported to release/v1.1.x + color: '#ffd700' +- name: backport:release/v1.2.x + description: To be backported to release/v1.2.x + color: '#ffd700' +- name: backport:release/v1.3.x + description: To be backported to release/v1.3.x + color: '#ffd700' +- name: backport:release/v1.4.x + description: To be backported to release/v1.4.x + color: '#ffd700' +- name: backport:release/v1.5.x + description: To be backported to release/v1.5.x + color: '#ffd700' +- name: backport:release/v1.6.x + description: To be backported to release/v1.6.x + color: '#ffd700' +- name: backport:release/v1.7.x + description: To be backported to release/v1.7.x + color: '#ffd700' diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml new file mode 100644 index 000000000..4081bb128 --- /dev/null +++ b/.github/workflows/backport.yaml @@ -0,0 +1,12 @@ +name: backport +on: + pull_request_target: + types: [closed, labeled] +jobs: + backport: + permissions: + contents: write # for reading and creating branches. + pull-requests: write # for creating pull requests against release branches. + uses: fluxcd/gha-workflows/.github/workflows/backport.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cifuzz.yaml b/.github/workflows/cifuzz.yaml new file mode 100644 index 000000000..16ddaa227 --- /dev/null +++ b/.github/workflows/cifuzz.yaml @@ -0,0 +1,20 @@ +name: fuzz +on: + pull_request: + branches: + - 'main' + - 'release/**' +jobs: + smoketest: + runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. + steps: + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 + with: + go-version: 1.25.x + - name: Smoke test Fuzzers + run: make fuzz-smoketest + env: + SKIP_COSIGN_VERIFICATION: true diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index e7cdce0dd..483e65ad6 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -1,111 +1,35 @@ name: e2e - on: + workflow_dispatch: pull_request: push: branches: - - main - + - 'main' + - 'release/**' jobs: - kind: + kind-linux-amd64: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Restore Go cache - uses: actions/cache@v1 + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 with: - path: /home/runner/work/_temp/_github_home/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Setup Kubernetes - uses: engineerd/setup-kind@v0.5.0 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Setup Kubebuilder - uses: fluxcd/pkg/actions/kubebuilder@main - - name: Setup Helm - uses: fluxcd/pkg/actions/helm@main - - name: Run tests - uses: ./.github/actions/run-tests + go-version: 1.25.x + - name: Verify + run: make verify + - name: Enable integration tests + # Only run integration tests for main and release branches + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') + run: | + echo 'GO_TAGS=integration' >> $GITHUB_ENV + - name: Run E2E tests env: - GOPATH: /github/home/go - KUBEBUILDER_ASSETS: ${{ github.workspace }}/kubebuilder/bin - - name: Check if working tree is dirty - run: | - if [[ $(git diff --stat) != '' ]]; then - git --no-pager diff - echo 'run make test and commit changes' - exit 1 - fi - - name: Build container image - run: make docker-build IMG=test/source-controller:latest - env: - KUBEBUILDER_ASSETS: ${{ github.workspace }}/kubebuilder/bin - - name: Load test image - run: kind load docker-image test/source-controller:latest - - name: Deploy controller - run: make dev-deploy IMG=test/source-controller:latest - env: - KUBEBUILDER_ASSETS: ${{ github.workspace }}/kubebuilder/bin - - name: Run smoke tests - run: | - kubectl -n source-system apply -f ./config/samples - kubectl -n source-system rollout status deploy/source-controller --timeout=1m - kubectl -n source-system wait gitrepository/gitrepository-sample --for=condition=ready --timeout=1m - kubectl -n source-system wait helmrepository/helmrepository-sample --for=condition=ready --timeout=1m - kubectl -n source-system wait helmchart/helmchart-sample --for=condition=ready --timeout=1m - kubectl -n source-system delete -f ./config/samples - - name: Run HelmChart values file tests - run: | - kubectl -n source-system apply -f ./config/testdata/helmchart-valuesfile - kubectl -n source-system wait helmchart/podinfo --for=condition=ready --timeout=5m - kubectl -n source-system wait helmchart/podinfo-git --for=condition=ready --timeout=5m - kubectl -n source-system delete -f ./config/testdata/helmchart-valuesfile - - name: Setup Minio - env: - MINIO_VER: ${{ 'v6.3.1' }} - run: | - kubectl create ns minio - helm repo add minio https://helm.min.io/ - helm upgrade --wait -i minio minio/minio \ - --version $MINIO_VER \ - --namespace minio \ - --set accessKey=myaccesskey \ - --set secretKey=mysecretkey \ - --set resources.requests.memory=128Mi \ - --set persistence.enable=false - kubectl -n minio port-forward svc/minio 9000:9000 &>/dev/null & - sleep 2 - wget -q https://dl.min.io/client/mc/release/linux-amd64/mc - chmod +x mc - ./mc alias set minio http://localhost:9000 myaccesskey mysecretkey --api S3v4 - kubectl -n source-system apply -f ./config/testdata/minio/secret.yaml - - name: Run Bucket tests - run: | - ./mc mb minio/podinfo - ./mc mirror ./config/testdata/minio/manifests/ minio/podinfo - - kubectl -n source-system apply -f ./config/testdata/bucket/source.yaml - kubectl -n source-system wait bucket/podinfo --for=condition=ready --timeout=1m - - name: Run HelmChart from Bucket tests - run: | - ./mc mb minio/charts - ./mc mirror ./controllers/testdata/charts/helmchart/ minio/charts/helmchart - - kubectl -n source-system apply -f ./config/testdata/helmchart-from-bucket/source.yaml - kubectl -n source-system wait bucket/charts --for=condition=ready --timeout=1m - kubectl -n source-system wait helmchart/helmchart-bucket --for=condition=ready --timeout=1m - - name: Logs - run: | - kubectl -n source-system logs deploy/source-controller - - name: Debug failure - if: failure() - run: | - kubectl -n source-system get gitrepositories -oyaml - kubectl -n source-system get helmrepositories -oyaml - kubectl -n source-system get helmcharts -oyaml - kubectl -n source-system get all - kubectl -n source-system logs deploy/source-controller - kubectl -n minio get all + SKIP_COSIGN_VERIFICATION: true + CREATE_CLUSTER: false + run: make e2e + - name: Print controller logs + if: always() + continue-on-error: true + run: | + kubectl -n source-system logs -l app=source-controller diff --git a/.github/workflows/fossa.yaml b/.github/workflows/fossa.yaml deleted file mode 100644 index 5019ee580..000000000 --- a/.github/workflows/fossa.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: FOSSA -on: - push: - branches: [main] - pull_request: - branches: [main] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: "^1.15.x" - - name: Add GOPATH to GITHUB_ENV - run: echo "GOPATH=$(go env GOPATH)" >>"$GITHUB_ENV" - - name: Add GOPATH to GITHUB_PATH - run: echo "$GOPATH/bin" >>"$GITHUB_PATH" - - name: Run FOSSA scan and upload build data - uses: fossa-contrib/fossa-action@v1 - with: - # FOSSA Push-Only API Token - fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de - github-token: ${{ github.token }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..9cc8d6e17 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,66 @@ +name: release +on: + push: + tags: + - 'v*' + workflow_dispatch: + inputs: + tag: + description: 'image tag prefix' + default: 'rc' + required: true +jobs: + release: + permissions: + contents: write # for creating the GitHub release. + id-token: write # for creating OIDC tokens for signing. + packages: write # for pushing and signing container images. + uses: fluxcd/gha-workflows/.github/workflows/controller-release.yaml@v0.4.0 + with: + controller: ${{ github.event.repository.name }} + release-candidate-prefix: ${{ github.event.inputs.tag }} + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + dockerhub-token: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} + release-provenance: + needs: [release] + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + contents: write # for uploading attestations to GitHub releases. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0 + with: + provenance-name: "provenance.intoto.jsonl" + base64-subjects: "${{ needs.release.outputs.release-digests }}" + upload-assets: true + dockerhub-provenance: + needs: [release] + permissions: + contents: read # for reading the repository code. + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 + with: + image: ${{ needs.release.outputs.image-name }} + digest: ${{ needs.release.outputs.image-digest }} + registry-username: ${{ github.repository_owner == 'fluxcd' && 'fluxcdbot' || github.repository_owner }} + secrets: + registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} + ghcr-provenance: + needs: [release] + permissions: + contents: read # for reading the repository code. + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 + with: + image: ghcr.io/${{ needs.release.outputs.image-name }} + digest: ${{ needs.release.outputs.image-digest }} + registry-username: fluxcdbot # not necessary for ghcr.io + secrets: + registry-password: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index c72c54b68..000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: release -on: - push: - tags: - - 'v*' - -jobs: - build-push: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Prepare - id: prep - run: | - VERSION=sha-${GITHUB_SHA::8} - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\//} - fi - echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=VERSION::${VERSION} - - name: Setup QEMU - uses: docker/setup-qemu-action@v1 - with: - platforms: all - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - with: - buildkitd-flags: "--debug" - - name: Login to GitHub Container Registry - uses: docker/login-action@v1 - with: - registry: ghcr.io - username: fluxcdbot - password: ${{ secrets.GHCR_TOKEN }} - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: fluxcdbot - password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} - - name: Publish multi-arch container image - uses: docker/build-push-action@v2 - with: - push: true - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: | - ghcr.io/fluxcd/source-controller:${{ steps.prep.outputs.VERSION }} - docker.io/fluxcd/source-controller:${{ steps.prep.outputs.VERSION }} - labels: | - org.opencontainers.image.title=${{ github.event.repository.name }} - org.opencontainers.image.description=${{ github.event.repository.description }} - org.opencontainers.image.url=${{ github.event.repository.html_url }} - org.opencontainers.image.revision=${{ github.sha }} - org.opencontainers.image.version=${{ steps.prep.outputs.VERSION }} - org.opencontainers.image.created=${{ steps.prep.outputs.BUILD_DATE }} - - name: Check images - run: | - docker buildx imagetools inspect docker.io/fluxcd/source-controller:${{ steps.prep.outputs.VERSION }} - docker buildx imagetools inspect ghcr.io/fluxcd/source-controller:${{ steps.prep.outputs.VERSION }} - docker pull docker.io/fluxcd/source-controller:${{ steps.prep.outputs.VERSION }} - docker pull ghcr.io/fluxcd/source-controller:${{ steps.prep.outputs.VERSION }} - - name: Generate release asset - if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') - run: | - mkdir -p config/release - cp config/default/* config/release - cd config/release - kustomize edit set image fluxcd/source-controller=fluxcd/source-controller:${{ steps.get_version.outputs.VERSION }} - kustomize build . > source-controller.yaml - - name: Create release - if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') - id: create_release - uses: actions/create-release@latest - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ github.ref }} - release_name: ${{ github.ref }} - draft: false - prerelease: true - body: | - [CHANGELOG](https://github.com/fluxcd/source-controller/blob/main/CHANGELOG.md) - - name: Upload artifacts - if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') - id: upload-release-asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./config/release/source-controller.yaml - asset_name: source-controller.yaml - asset_content_type: text/plain diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml new file mode 100644 index 000000000..ea8e992de --- /dev/null +++ b/.github/workflows/scan.yaml @@ -0,0 +1,17 @@ +name: scan +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + schedule: + - cron: '18 10 * * 3' +jobs: + analyze: + permissions: + contents: read # for reading the repository code. + security-events: write # for uploading the CodeQL analysis results. + uses: fluxcd/gha-workflows/.github/workflows/code-scan.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + fossa-token: ${{ secrets.FOSSA_TOKEN }} diff --git a/.github/workflows/sync-labels.yaml b/.github/workflows/sync-labels.yaml new file mode 100644 index 000000000..a4635094d --- /dev/null +++ b/.github/workflows/sync-labels.yaml @@ -0,0 +1,16 @@ +name: sync-labels +on: + workflow_dispatch: + push: + branches: + - main + paths: + - .github/labels.yaml +jobs: + sync-labels: + permissions: + contents: read # for reading the labels file. + issues: write # for creating and updating labels. + uses: fluxcd/gha-workflows/.github/workflows/labels-sync.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..c7a9aa2e8 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,22 @@ +name: test +on: + workflow_dispatch: + pull_request: + push: + branches: + - 'main' + - 'release/**' +jobs: + test-linux-amd64: + runs-on: ubuntu-latest + steps: + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 + with: + go-version: 1.25.x + - name: Run tests + env: + SKIP_COSIGN_VERIFICATION: true + TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} + TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} + run: make test diff --git a/.gitignore b/.gitignore index 8f19ec807..327ff117d 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,8 @@ # Dependency directories (remove the comment below to include it) # vendor/ bin/ +testbin/ config/release/ + +# Exclude temporary build files +build/ diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 000000000..7b61ce0c1 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,57 @@ +project_name: source-controller + +builds: + - skip: true + +release: + extra_files: + - glob: config/release/*.yaml + prerelease: "auto" + header: | + ## Changelog + + [{{.Tag}} changelog](https://github.com/fluxcd/{{.ProjectName}}/blob/{{.Tag}}/CHANGELOG.md) + footer: | + ## Container images + + - `docker.io/fluxcd/{{.ProjectName}}:{{.Tag}}` + - `ghcr.io/fluxcd/{{.ProjectName}}:{{.Tag}}` + + Supported architectures: `linux/amd64`, `linux/arm64` and `linux/arm/v7`. + + The container images are built on GitHub hosted runners and are signed with cosign and GitHub OIDC. + To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/). + +changelog: + disable: true + +checksum: + extra_files: + - glob: config/release/*.yaml + +source: + enabled: true + name_template: "{{ .ProjectName }}_{{ .Version }}_source_code" + +sboms: + - id: source + artifacts: source + documents: + - "{{ .ProjectName }}_{{ .Version }}_sbom.spdx.json" + +# signs the checksum file +# all files (including the sboms) are included in the checksum +# https://goreleaser.com/customization/sign +signs: + - cmd: cosign + env: + - COSIGN_EXPERIMENTAL=1 + certificate: "${artifact}.pem" + args: + - sign-blob + - "--yes" + - "--output-certificate=${certificate}" + - "--output-signature=${signature}" + - "${artifact}" + artifacts: checksum + output: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 96f63a703..74cb010a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,2857 @@ All notable changes to this project are documented in this file. +## 1.7.0 + +**Release date:** 2025-09-15 + +This minor release comes with new features, improvements and bug fixes. + +### ExternalArtifact + +A new [ExternalArtifact](https://github.com/fluxcd/source-controller/blob/main/docs/spec/v1/externalartifacts.md) API has been added to the `source.toolkit.fluxcd.io` group. This API enables advanced source composition and decomposition patterns implemented by the [source-watcher](https://github.com/fluxcd/source-watcher) controller. + +### GitRepository + +GitRepository controller now includes fixes for stalling issues and improved error handling. Multi-tenant workload identity support has been added for Azure repositories when the `ObjectLevelWorkloadIdentity` feature gate is enabled. TLS configuration support has been added for GitHub App authentication. + +### Bucket + +Bucket controller now supports multi-tenant workload identity for AWS, Azure and GCP providers when the `ObjectLevelWorkloadIdentity` feature gate is enabled. A default service account flag has been added for lockdown scenarios. + +### General updates + +The controller now supports system certificate pools for improved CA compatibility, and TLS ServerName pinning has been removed from TLS configuration for better flexibility. A `--default-service-account=` flag was introduced for workload identity multi-tenancy lockdown. + +In addition, the Kubernetes dependencies have been updated to v1.34, Helm +has been updated to v3.19 and various other controller dependencies have +been updated to their latest version. The controller is now built with +Go 1.25. + +Fixes: +- Fix GitRepository controller stalling when it shouldn't + [#1865](https://github.com/fluxcd/source-controller/pull/1865) + +Improvements: +- [RFC-0010] Add multi-tenant workload identity support for GCP Bucket + [#1862](https://github.com/fluxcd/source-controller/pull/1862) +- [RFC-0010] Add multi-tenant workload identity support for AWS Bucket + [#1868](https://github.com/fluxcd/source-controller/pull/1868) +- [RFC-0010] Add multi-tenant workload identity support for Azure GitRepository + [#1871](https://github.com/fluxcd/source-controller/pull/1871) +- [RFC-0010] Add default-service-account for lockdown + [#1872](https://github.com/fluxcd/source-controller/pull/1872) +- [RFC-0010] Add multi-tenant workload identity support for Azure Blob Storage + [#1875](https://github.com/fluxcd/source-controller/pull/1875) +- [RFC-0012] Add ExternalArtifact API documentation + [#1881](https://github.com/fluxcd/source-controller/pull/1881) +- [RFC-0012] Refactor controller to use `fluxcd/pkg/artifact` + [#1883](https://github.com/fluxcd/source-controller/pull/1883) +- Migrate OCIRepository controller to runtime/secrets + [#1851](https://github.com/fluxcd/source-controller/pull/1851) +- Migrate Bucket controller to runtime/secrets + [#1852](https://github.com/fluxcd/source-controller/pull/1852) +- Add TLS config for GitHub App authentication + [#1860](https://github.com/fluxcd/source-controller/pull/1860) +- Remove ServerName pinning from TLS config + [#1870](https://github.com/fluxcd/source-controller/pull/1870) +- Extract storage operations to a dedicated package + [#1864](https://github.com/fluxcd/source-controller/pull/1864) +- Remove deprecated APIs in group `source.toolkit.fluxcd.io/v1beta1` + [#1861](https://github.com/fluxcd/source-controller/pull/1861) +- Migrate tests from gotest to gomega + [#1876](https://github.com/fluxcd/source-controller/pull/1876) +- Update dependencies + [#1888](https://github.com/fluxcd/source-controller/pull/1888) + [#1880](https://github.com/fluxcd/source-controller/pull/1880) + [#1878](https://github.com/fluxcd/source-controller/pull/1878) + [#1876](https://github.com/fluxcd/source-controller/pull/1876) + [#1874](https://github.com/fluxcd/source-controller/pull/1874) + [#1850](https://github.com/fluxcd/source-controller/pull/1850) + [#1844](https://github.com/fluxcd/source-controller/pull/1844) + +## 1.6.2 + +**Release date:** 2025-06-27 + +This patch release comes with a fix for `rsa-sha2-512` and `rsa-sha2-256` algorithms +not being prioritized for `ssh-rsa` host keys. + +Fixes: +- Fix: Prioritize sha2-512 and sha2-256 for ssh-rsa host keys + [#1839](https://github.com/fluxcd/source-controller/pull/1839) + +## 1.6.1 + +**Release date:** 2025-06-13 + +This patch release comes with a fix for the `knownhosts: key mismatch` +error in the `GitRepository` API when using SSH authentication, and +a fix for authentication with +[public ECR repositories](https://fluxcd.io/flux/integrations/aws/#for-amazon-public-elastic-container-registry) +in the `OCIRepository` API. + +Fix: +- Fix authentication for public ECR + [#1825](https://github.com/fluxcd/source-controller/pull/1825) +- Fix `knownhosts key mismatch` regression bug + [#1829](https://github.com/fluxcd/source-controller/pull/1829) + +## 1.6.0 + +**Release date:** 2025-05-27 + +This minor release promotes the OCIRepository API to GA, and comes with new features, +improvements and bug fixes. + +### OCIRepository + +The `OCIRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +OCIRepository API now supports object-level workload identity by setting +`.spec.provider` to one of `aws`, `azure`, or `gcp`, and setting +`.spec.serviceAccountName` to the name of a service account in the same +namespace that has been configured with appropriate cloud permissions. +For this feature to work, the controller feature gate +`ObjectLevelWorkloadIdentity` must be enabled. See a complete guide +[here](https://fluxcd.io/flux/integrations/). + +OCIRepository API now caches registry credentials for cloud providers +by default. This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### GitRepository + +GitRepository API now supports sparse checkout by setting a list +of directories in the `.spec.sparseCheckout` field. This allows +for optimizing the amount of data fetched from the Git repository. + +GitRepository API now supports mTLS authentication for HTTPS Git repositories +by setting the fields `tls.crt`, `tls.key`, and `ca.crt` in the `.data` field +of the referenced Secret in `.spec.secretRef`. + +GitRepository API now caches credentials for non-`generic` providers by default. +This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### General updates + +In addition, the Kubernetes dependencies have been updated to v1.33 and +various other controller dependencies have been updated to their latest +version. The controller is now built with Go 1.24. + +Fixes: +- Downgrade `Masterminds/semver` to v3.3.0 + [#1785](https://github.com/fluxcd/source-controller/pull/1785) + +Improvements: +- Promote OCIRepository API to v1 (GA) + [#1794](https://github.com/fluxcd/source-controller/pull/1794) +- [RFC-0010] Introduce object-level workload identity for container registry APIs and cache credentials + [#1790](https://github.com/fluxcd/source-controller/pull/1790) + [#1802](https://github.com/fluxcd/source-controller/pull/1802) + [#1811](https://github.com/fluxcd/source-controller/pull/1811) +- Implement Sparse Checkout for `GitRepository` + [#1774](https://github.com/fluxcd/source-controller/pull/1774) +- Add Mutual TLS support to `GitRepository` + [#1778](https://github.com/fluxcd/source-controller/pull/1778) +- Introduce token cache for `GitRepository` + [#1745](https://github.com/fluxcd/source-controller/pull/1745) + [#1788](https://github.com/fluxcd/source-controller/pull/1788) + [#1789](https://github.com/fluxcd/source-controller/pull/1789) +- Build controller without CGO + [#1725](https://github.com/fluxcd/source-controller/pull/1725) +- Various dependency updates + [#1812](https://github.com/fluxcd/source-controller/pull/1812) + [#1800](https://github.com/fluxcd/source-controller/pull/1800) + [#1810](https://github.com/fluxcd/source-controller/pull/1810) + [#1806](https://github.com/fluxcd/source-controller/pull/1806) + [#1782](https://github.com/fluxcd/source-controller/pull/1782) + [#1783](https://github.com/fluxcd/source-controller/pull/1783) + [#1775](https://github.com/fluxcd/source-controller/pull/1775) + [#1728](https://github.com/fluxcd/source-controller/pull/1728) + [#1722](https://github.com/fluxcd/source-controller/pull/1722) + +## 1.5.0 + +**Release date:** 2025-02-13 + +This minor release comes with various bug fixes and improvements. + +### GitRepository + +The GitRepository API now supports authenticating through GitHub App +for GitHub repositories. See +[docs](https://fluxcd.io/flux/components/source/gitrepositories/#github). + +In addition, the Kubernetes dependencies have been updated to v1.32.1, Helm has +been updated to v3.17.0 and various other controller dependencies have been +updated to their latest version. + +Fixes: +- Remove deprecated object metrics from controllers + [#1686](https://github.com/fluxcd/source-controller/pull/1686) + +Improvements: +- [RFC-007] Implement GitHub app authentication for git repositories. + [#1647](https://github.com/fluxcd/source-controller/pull/1647) +- Various dependency updates + [#1684](https://github.com/fluxcd/source-controller/pull/1684) + [#1689](https://github.com/fluxcd/source-controller/pull/1689) + [#1693](https://github.com/fluxcd/source-controller/pull/1693) + [#1705](https://github.com/fluxcd/source-controller/pull/1705) + [#1708](https://github.com/fluxcd/source-controller/pull/1708) + [#1709](https://github.com/fluxcd/source-controller/pull/1709) + [#1713](https://github.com/fluxcd/source-controller/pull/1713) + [#1716](https://github.com/fluxcd/source-controller/pull/1716) + +## 1.4.1 + +**Release date:** 2024-09-26 + +This patch release comes with a fix to the `GitRepository` API to keep it +backwards compatible by removing the default value for `.spec.provider` field +when not set in the API. The controller will internally consider an empty value +for the provider as the `generic` provider. + +Fix: +- GitRepo: Remove provider default value from API + [#1626](https://github.com/fluxcd/source-controller/pull/1626) + +## 1.4.0 + +**Release date:** 2024-09-25 + +This minor release promotes the Bucket API to GA, and comes with new features, +improvements and bug fixes. + +### Bucket + +The `Bucket` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +Bucket API now supports proxy through the field `.spec.proxySecretRef` and custom TLS client certificate and CA through the field `.spec.certSecretRef`. + +Bucket API now also supports specifying a custom STS configuration through the field `.spec.sts`. This is currently only supported for the providers `generic` and `aws`. When specifying a custom STS configuration one must specify which STS provider to use. For the `generic` bucket provider we support the `ldap` STS provider, and for the `aws` bucket provider we support the `aws` STS provider. For the `aws` STS provider, one may use the default main STS endpoint, or the regional STS endpoints, or even an interface endpoint. + +### OCIRepository + +OCIRepository API now supports proxy through the field `.spec.proxySecretRef`. + +**Warning**: Proxy is not supported for cosign keyless verification. + +### GitRepository + +GitRepository API now supports OIDC authentication for Azure DevOps repositories through the field `.spec.provider` using the value `azure`. See the docs for details [here](https://fluxcd.io/flux/components/source/gitrepositories/#provider). + +In addition, the Kubernetes dependencies have been updated to v1.31.1, Helm has +been updated to v3.16.1 and various other controller dependencies have been +updated to their latest version. The controller is now built with Go 1.23. + +Fixes: +- helm: Use the default transport pool to preserve proxy settings + [#1490](https://github.com/fluxcd/source-controller/pull/1490) +- Fix incorrect use of format strings with the conditions package. + [#1529](https://github.com/fluxcd/source-controller/pull/1529) +- Fix HelmChart local dependency resolution for name-based path + [#1539](https://github.com/fluxcd/source-controller/pull/1539) +- Fix Helm index validation for Artifactory + [#1516](https://github.com/fluxcd/source-controller/pull/1516) + +Improvements: +- Promote Bucket API to v1 + [#1592](https://github.com/fluxcd/source-controller/pull/1592) +- Add .spec.certSecretRef to Bucket API + [#1475](https://github.com/fluxcd/source-controller/pull/1475) +- Run ARM64 tests on GitHub runners + [#1512](https://github.com/fluxcd/source-controller/pull/1512) +- Add support for .spec.proxySecretRef for generic provider of Bucket API + [#1500](https://github.com/fluxcd/source-controller/pull/1500) +- Improve invalid proxy error message for Bucket API + [#1550](https://github.com/fluxcd/source-controller/pull/1550) +- Add support for AWS STS endpoint in the Bucket API + [#1552](https://github.com/fluxcd/source-controller/pull/1552) +- Add proxy support for GCS buckets + [#1565](https://github.com/fluxcd/source-controller/pull/1565) +- azure-blob: Fix VisitObjects() in integration test + [#1574](https://github.com/fluxcd/source-controller/pull/1574) +- Add proxy support for Azure buckets + [#1567](https://github.com/fluxcd/source-controller/pull/1567) +- Add proxy support for AWS S3 buckets + [#1568](https://github.com/fluxcd/source-controller/pull/1568) +- Add proxy support for OCIRepository API + [#1536](https://github.com/fluxcd/source-controller/pull/1536) +- Add LDAP provider for Bucket STS API + [#1585](https://github.com/fluxcd/source-controller/pull/1585) +- Introduce Bucket provider constants with the common part as a prefix + [#1589](https://github.com/fluxcd/source-controller/pull/1589) +- OCIRepository: Configure proxy for OIDC auth + [#1607](https://github.com/fluxcd/source-controller/pull/1607) +- [RFC-0007] Enable Azure OIDC for Azure DevOps repositories + [#1591](https://github.com/fluxcd/source-controller/pull/1591) +- Build with Go 1.23 + [#1582](https://github.com/fluxcd/source-controller/pull/1582) +- Various dependency updates + [#1507](https://github.com/fluxcd/source-controller/pull/1507) + [#1576](https://github.com/fluxcd/source-controller/pull/1576) + [#1578](https://github.com/fluxcd/source-controller/pull/1578) + [#1579](https://github.com/fluxcd/source-controller/pull/1579) + [#1583](https://github.com/fluxcd/source-controller/pull/1583) + [#1588](https://github.com/fluxcd/source-controller/pull/1588) + [#1603](https://github.com/fluxcd/source-controller/pull/1603) + [#1610](https://github.com/fluxcd/source-controller/pull/1610) + [#1614](https://github.com/fluxcd/source-controller/pull/1614) + [#1618](https://github.com/fluxcd/source-controller/pull/1618) + +## 1.3.0 + +**Release date:** 2024-05-03 + +This minor release promotes the Helm APIs to GA, and comes with new features, +improvements and bug fixes. + +### HelmRepository + +The `HelmRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +For `HelmRepository` of type `oci`, the `.spec.insecure` field allows connecting +over HTTP to an insecure non-TLS container registry. + +To upgrade from `v1beta2`, after deploying the new CRD and controller, +set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that +contain `HelmRepository` definitions. +Bumping the API version in manifests can be done gradually. +It is advised not to delay this procedure as the beta versions will be removed after 6 months. + +### HelmChart + +The `HelmChart` API have been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`, with the exception +of the removal of the deprecated field `.spec.valuesFile` which was replaced with `spec.valuesFiles`. + +The `HelmChart` API was extended with support for +[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1/helmcharts.md#notation) +of Helm OCI charts. + +A new optional field `.spec.ignoreMissingValuesFiles` has been added, +which allows the controller to ignore missing values files rather than failing to reconcile the `HelmChart`. + +### OCIRepository + +The `OCIRepository` API was extended with support for +[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#notation) +of OCI artifacts. + +A new optional field `.spec.ref.semverFilter` has been added, +which allows the controller to filter the tags based on regular expressions +before applying the semver range. This allows +[picking the latest release candidate](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#semverfilter-example) +instead of the latest stable release. + +In addition, the controller has been updated to Kubernetes v1.30.0, +Helm v3.14.4, and various other dependencies to their latest version +to patch upstream CVEs. + +Improvements: +- Promote Helm APIs to `source.toolkit.fluxcd.io/v1` (GA) + [#1428](https://github.com/fluxcd/source-controller/pull/1428) +- Add `.spec.ignoreMissingValuesFiles` to HelmChart API + [#1447](https://github.com/fluxcd/source-controller/pull/1447) +- Implement `.spec.ref.semverFilter` in OCIRepository API + [#1407](https://github.com/fluxcd/source-controller/pull/1407) +- Helm: Allow insecure registry login + [#1412](https://github.com/fluxcd/source-controller/pull/1442) +- Add support for Notation verification to HelmChart and OCIRepository + [#1075](https://github.com/fluxcd/source-controller/pull/1075) +- Various dependency updates + [#1442](https://github.com/fluxcd/source-controller/pull/1442) + [#1450](https://github.com/fluxcd/source-controller/pull/1450) + [#1469](https://github.com/fluxcd/source-controller/pull/1469) + [#1378](https://github.com/fluxcd/source-controller/pull/1378) + +Fixes: +- Bind cached helm index to the maximum index size + [#1457](https://github.com/fluxcd/source-controller/pull/1457) +- Remove `genclient:Namespaced` tag + [#1386](https://github.com/fluxcd/source-controller/pull/1386) + +## 1.2.5 + +**Release date:** 2024-04-04 + +This patch release comes with improvements to the `HelmChart` name validation +and adds logging sanitization of connection error messages for `Bucket` sources. + +Fixes: +- Improve chart name validation + [#1377](https://github.com/fluxcd/source-controller/pull/1377) +- Sanitize URLs for bucket fetch error messages + [#1430](https://github.com/fluxcd/source-controller/pull/1430) + +Improvements: +- Update controller-gen to v0.14.0 + [#1399](https://github.com/fluxcd/source-controller/pull/1399) + +## 1.2.4 + +**Release date:** 2024-02-01 + +This patch release updates the Kubernetes dependencies to v1.28.6 and various +other dependencies to their latest version to patch upstream CVEs. + +Improvements: +- Various dependency updates + [#1362](https://github.com/fluxcd/source-controller/pull/1362) + [#1357](https://github.com/fluxcd/source-controller/pull/1357) + [#1353](https://github.com/fluxcd/source-controller/pull/1353) + [#1347](https://github.com/fluxcd/source-controller/pull/1347) + [#1343](https://github.com/fluxcd/source-controller/pull/1343) + [#1340](https://github.com/fluxcd/source-controller/pull/1340) + [#1338](https://github.com/fluxcd/source-controller/pull/1338) + [#1336](https://github.com/fluxcd/source-controller/pull/1336) + [#1334](https://github.com/fluxcd/source-controller/pull/1334) + +## 1.2.3 + +**Release date:** 2023-12-14 + +This patch release updates the controller's Helm dependency to v3.13.3. + +Improvements: +- Update Helm to v3.13.3 + [#1325](https://github.com/fluxcd/source-controller/pull/1325) +- helmrepo: Remove migration log/event + [#1324](https://github.com/fluxcd/source-controller/pull/1324) + +## 1.2.2 + +**Release date:** 2023-12-11 + +This patch release addresses an issue with AWS ECR authentication introduced in +v1.2.0. + +In addition, a variety of dependencies have been updated. Including an update +of the container base image to Alpine v3.19. + +Fixes: +- Address issue with authenticating towards AWS ECR + [#1318](https://github.com/fluxcd/source-controller/pull/1318) + [#1321](https://github.com/fluxcd/source-controller/pull/1318) + +Improvements: + +- Update dependencies + [#1314](https://github.com/fluxcd/source-controller/pull/1314) + [#1318](https://github.com/fluxcd/source-controller/pull/1318) + [#1321](https://github.com/fluxcd/source-controller/pull/1321) +- build: update Alpine to 3.19 + [#1316](https://github.com/fluxcd/source-controller/pull/1316) + +## 1.2.1 + +**Release date:** 2023-12-08 + +This patch release ensures the controller is built with the latest Go `1.21.x` +release, to mitigate multiple security vulnerabilities which were published +shortly after the release of v1.2.0. + +In addition, a small number of dependencies have been updated to their latest +version. + +Improvements: +- Update dependencies + [#1309](https://github.com/fluxcd/source-controller/pull/1309) + +## 1.2.0 + +**Release date:** 2023-12-05 + +This minor release comes with API changes, bug fixes and several new features. + +### Bucket + +A new field, `.spec.prefix`, has been added to the Bucket API, which enables +server-side filtering of files if the object's `.spec.provider` is set to +`generic`/`aws`/`gcp`. + +### OCIRepository and HelmChart + +Two new fields, `.spec.verify.matchOIDCIdentity.issuer` and +`.spec.verify.matchOIDCIdentity.subject` have been added to the HelmChart and +OCIRepository APIs. If the image has been keylessly signed via Cosign, these +fields can be used to verify the OIDC issuer of the Fulcio certificate and the +OIDC identity's subject respectively. + +### HelmRepository + +A new boolean field, `.spec.insecure`, has been introduced to the HelmRepository +API, which allows connecting to a non-TLS HTTP container registry. It is only +considered if the object's `.spec.type` is set to `oci`. + +From this release onwards, HelmRepository objects of type OCI are treated as +static objects, i.e. they have an empty status. +Existing objects undergo a one-time automatic migration and new objects +will be undergo a one-time reconciliation to remove any status fields. + +Additionally, the controller now performs a shallow clone if the +`.spec.ref.name` of the GitRepository object points to a branch or a tag. + +Furthermore, a bug has been fixed, where the controller would try to +authenticate against public OCI registries if the HelmRepository object has a +reference to a Secret containing a CA certificate. + +Lastly, dependencies have been updated to their latest version, including an +update of Kubernetes to v1.28.4. + +Fixes: +- Address miscellaneous issues throughout code base + [#1257](https://github.com/fluxcd/source-controller/pull/1257) +- helmrepo: only configure tls login option when required + [#1289](https://github.com/fluxcd/source-controller/pull/1289) +- oci: rename `OCIChartRepository.insecure` to `insecureHTTP` + [#1299](https://github.com/fluxcd/source-controller/pull/1299) +- Use bitnami Minio oci chart for e2e + [#1301](https://github.com/fluxcd/source-controller/pull/1301) + +Improvements: +- build(deps): bump Go dependencies + [#1260](https://github.com/fluxcd/source-controller/pull/1260) + [#1261](https://github.com/fluxcd/source-controller/pull/1261) + [#1269](https://github.com/fluxcd/source-controller/pull/1269) + [#1291](https://github.com/fluxcd/source-controller/pull/1291) +- build(deps): bump the ci group dependencies + [#1265](https://github.com/fluxcd/source-controller/pull/1265) + [#1266](https://github.com/fluxcd/source-controller/pull/1266) + [#1272](https://github.com/fluxcd/source-controller/pull/1272) + [#1277](https://github.com/fluxcd/source-controller/pull/1277) + [#1281](https://github.com/fluxcd/source-controller/pull/1281) + [#1285](https://github.com/fluxcd/source-controller/pull/1285) + [#1296](https://github.com/fluxcd/source-controller/pull/1296) + [#1303](https://github.com/fluxcd/source-controller/pull/1303) +- bucket: Add prefix filtering capability + [#1228](https://github.com/fluxcd/source-controller/pull/1228) +- Static HelmRepository OCI + [#1243](https://github.com/fluxcd/source-controller/pull/1243) +- cosign: allow identity matching for keyless verification + [#1250](https://github.com/fluxcd/source-controller/pull/1250) +- Upgrade `go-git` to v5.10.0 + [#1271](https://github.com/fluxcd/source-controller/pull/1271) +- storage: change default file permissions + [#1276](https://github.com/fluxcd/source-controller/pull/1276) +- Update dependencies to Kubernetes v1.28 + [#1286](https://github.com/fluxcd/source-controller/pull/1286) +- Add `.spec.insecure` to `HelmRepository` for `type: oci` + [#1288](https://github.com/fluxcd/source-controller/pull/1288) +- Update Git dependencies + [#1300](https://github.com/fluxcd/source-controller/pull/1300) +- Update Go dependencies + [#1304](https://github.com/fluxcd/source-controller/pull/1304) + +## 1.1.2 + +**Release date:** 2023-10-11 + +This patch release fixes a bug where OCIRepository objects can't be consumed +when the OCI image layer contains symlinks. + +Fixes: +- oci: Skip symlinks found in upstream artifacts + [#1246](https://github.com/fluxcd/source-controller/pull/1246/) + +Improvements: +- build(deps): bump the ci group with 1 update + [#1256](https://github.com/fluxcd/source-controller/pull/1256) + +## 1.1.1 + +**Release date:** 2023-09-18 + +This is a patch release that fixes a regression introduced in v1.1.0 where +HelmRepository objects would not be reconciled if they provided a TLS Secret +using `.spec.secretRef` with a type other than `Opaque` or `kubernetes.io/tls`. + +In addition, the URL lookup strategy for Buckets has been changed from path to +auto, to widen support for S3-compatible object storage services. + +Lastly, several dependencies have been updated to their latest versions. + +Fixes: +- bucket: use auto lookup type + [#1222](https://github.com/fluxcd/source-controller/pull/1222) +- helmrepo: fix Secret type check for TLS via `.spec.secretRef` + [#1225](https://github.com/fluxcd/source-controller/pull/1225) +- Upgrade github.com/fluxcd/pkg/{git,git/gogit} + [#1236](https://github.com/fluxcd/source-controller/pull/1236) + +Improvements: +- build(deps): bump the ci group dependencies + [#1213](https://github.com/fluxcd/source-controller/pull/1213) + [#1224](https://github.com/fluxcd/source-controller/pull/1224) + [#1230](https://github.com/fluxcd/source-controller/pull/1230) + [#1235](https://github.com/fluxcd/source-controller/pull/1235) +- docs: Add missing pem-encoding reference + [#1216](https://github.com/fluxcd/source-controller/pull/1216) +- build(deps): bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 + [#1227](https://github.com/fluxcd/source-controller/pull/1227) + +## 1.1.0 + +**Release date:** 2023-08-23 + +This minor release comes with API changes, bug fixes and several new features. + +All APIs that accept TLS data have been modified to adopt Secrets of type +`kubernetes.io/tls`. This includes: +* HelmRepository: The field `.spec.secretRef` has been __deprecated__ in favor +of a new field [`.spec.certSecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1beta2/helmrepositories.md#cert-secret-reference). + This field is also supported by OCI HelmRepositories. +* OCIRepository: Support for the`caFile`, `keyFile` and `certFile` keys in the + Secret specified in [`.spec.certSecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1beta2/ocirepositories.md#cert-secret-reference) + have been __deprecated__ in favor of `ca.crt`, `tls.key` and `tls.crt`. + Also, the Secret now must be of type `Opaque` or `kubernete.io/tls`. +* GitRepository: CA certificate can now be provided in the Secret sepcified in + `.spec.secretRef` using the `ca.crt` key, which takes precedence over the + existing `caFile` key. + +Furthermore, GitRepository has a couple of new features: +* Proxy support: A new field [`.spec.proxySecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1/gitrepositories.md#proxy-secret-reference) + has been introduced which can be used to specify the proxy configuration to + use for all remote Git operations related to the particular object. +* Tag verification: The field [`.spec.verification.mode`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1/gitrepositories.md#verification) + now supports the following values: + * HEAD: Verify the HEAD of the Git repository. + * Tag: Verify the tag specified in `.spec.ref` + * TagAndHead: Verify the tag specified in `.spec.ref` and the commit it + points to. + +Starting with this version, the controller now stops exporting an object's +metrics as soon as the object has been deleted. + +In addition, the controller now consumes significantly less CPU and memory when +reconciling Helm repository indexes. + +Lastly, a new flag `--interval-jitter-percentage` has been introduced which can +be used to specify a jitter to the reconciliation interval in order to +distribute the load more evenly when multiple objects are set up with the same +interval. + +Improvements: +- gitrepo: Add support for specifying proxy per `GitRepository` + [#1109](https://github.com/fluxcd/source-controller/pull/1109) +- helmrepo: add `.spec.certSecretRef` for specifying TLS auth data + [#1160](https://github.com/fluxcd/source-controller/pull/1160) +- Update docs on Azure identity + [#1167](https://github.com/fluxcd/source-controller/pull/1167) +- gitrepo: document limitation of `spec.ref.name` with Azure Devops + [#1175](https://github.com/fluxcd/source-controller/pull/1175) +- ocirepo: add cosign support for insecure HTTP registries + [#1176](https://github.com/fluxcd/source-controller/pull/1176) +- Handle delete before adding finalizer + [#1177](https://github.com/fluxcd/source-controller/pull/1177) +- Store Helm indexes in JSON format + [#1178](https://github.com/fluxcd/source-controller/pull/1178) +- Unpin go-git and update to v5.8.1 + [#1179](https://github.com/fluxcd/source-controller/pull/1179) +- controller: jitter requeue interval + [#1184](https://github.com/fluxcd/source-controller/pull/1184) +- cache: ensure new expiration is persisted + [#1185](https://github.com/fluxcd/source-controller/pull/1185) +- gitrepo: add support for Git tag verification + [#1187](https://github.com/fluxcd/source-controller/pull/1187) +- Update dependencies + [#1191](https://github.com/fluxcd/source-controller/pull/1191) +- Adopt Kubernetes style TLS Secrets + [#1194](https://github.com/fluxcd/source-controller/pull/1194) +- Update dependencies + [#1196](https://github.com/fluxcd/source-controller/pull/1196) +- Helm OCI: Add support for TLS registries with self-signed certs + [#1197](https://github.com/fluxcd/source-controller/pull/1197) +- Update dependencies + [#1202](https://github.com/fluxcd/source-controller/pull/1202) +- Preserve url encoded path in normalized helm repository URL + [#1203](https://github.com/fluxcd/source-controller/pull/1203) +- Fix link ref in API docs + [#1204](https://github.com/fluxcd/source-controller/pull/1204) + +Fixes: +- Fix the helm cache arguments + [#1170](https://github.com/fluxcd/source-controller/pull/1170) +- Delete stale metrics on object delete + [#1183](https://github.com/fluxcd/source-controller/pull/1183) +- Disable system-wide git config in tests + [#1192](https://github.com/fluxcd/source-controller/pull/1192) +- Fix links in API docs + [#1200](https://github.com/fluxcd/source-controller/pull/1200) + +## 1.0.1 + +**Release date:** 2023-07-10 + +This is a patch release that fixes the AWS authentication for cross-region ECR repositories. + +Fixes: +- Update `fluxcd/pkg/oci` to fix ECR cross-region auth + [#1158](https://github.com/fluxcd/source-controller/pull/1158) + +## 1.0.0 + +**Release date:** 2023-07-03 + +This is the first stable release of the controller. From now on, this controller +follows the [Flux 2 release cadence and support pledge](https://fluxcd.io/flux/releases/). + +Starting with this version, the build, release and provenance portions of the +Flux project supply chain [provisionally meet SLSA Build Level 3](https://fluxcd.io/flux/security/slsa-assessment/). + +This release includes several minor changes that primarily focus on addressing +forgotten and obsolete bits in the logic related to GitRepository objects. + +Including a removal of the `OptimizedGitClones` feature flag. If your +Deployment is configured to disable this flag, you should remove it. + +In addition, dependencies have been updated to their latest version, including +an update of Kubernetes to v1.27.3. + +For a comprehensive list of changes since `v0.36.x`, please refer to the +changelog for [v1.0.0-rc.1](#100-rc1), [v1.0.0-rc.3](#100-rc3) and +[`v1.0.0-rc.4`](#100-rc4). + +Improvements: +- gitrepo: remove `OptimizedGitClones` as a feature gate + [#1124](https://github.com/fluxcd/source-controller/pull/1124) + [#1126](https://github.com/fluxcd/source-controller/pull/1126) +- Update dependencies + [#1127](https://github.com/fluxcd/source-controller/pull/1127) + [#1147](https://github.com/fluxcd/source-controller/pull/1147) +- Update Cosign to v2.1.0 + [#1132](https://github.com/fluxcd/source-controller/pull/1132) +- Align `go.mod` version with Kubernetes (Go 1.20) + [#1134](https://github.com/fluxcd/source-controller/pull/1134) +- Add the verification key to the GitRepository verified status condition +- [#1136](https://github.com/fluxcd/source-controller/pull/1136) +- gitrepo: remove obsolete proxy docs + [#1144](https://github.com/fluxcd/source-controller/pull/1144) + +## 1.0.0-rc.5 + +**Release date:** 2023-06-01 + +This release candidate fixes a regression introduced in `1.0.0.-rc.4` where +support for Git servers that exclusively use v2 of the wire protocol like Azure +Devops and AWS CodeCommit was broken. + +Lastly, the controller's dependencies were updated to mitigate CVE-2023-33199. + +Improvements: +- build(deps): bump github.com/sigstore/rekor from 1.1.1 to 1.2.0 + [#1107](https://github.com/fluxcd/source-controller/pull/1107) + +Fixes: +- Bump `fluxcd/pkg/git/gogit` to v0.12.0 + [#1111](https://github.com/fluxcd/source-controller/pull/1111) + +## 1.0.0-rc.4 + +**Release date:** 2023-05-26 + +This release candidate comes with support for Kubernetes v1.27 and Cosign v2. +It also enables the use of annotated Git tags with `.spec.ref.name` in +`GitRepository`. Furthermore, it fixes a bug related to accessing Helm OCI +charts on ACR using OIDC auth. + +Improvements: +- build(deps): bump helm/kind-action from 1.5.0 to 1.7.0 + [#1100](https://github.com/fluxcd/source-controller/pull/1100) +- build(deps): bump sigstore/cosign-installer from 3.0.3 to 3.0.5 + [#1101](https://github.com/fluxcd/source-controller/pull/1101) +- build(deps): bump actions/setup-go from 4.0.0 to 4.0.1 + [#1102](https://github.com/fluxcd/source-controller/pull/1102) +- Update cosign to v2 + [#1096](https://github.com/fluxcd/source-controller/pull/1096) +- build(deps): bump github.com/sigstore/rekor from 0.12.1-0.20220915152154-4bb6f441c1b2 to 1.1.1 + [#1083](https://github.com/fluxcd/source-controller/pull/1083) +- Update controller-runtime and Kubernetes dependencies + [#1104](https://github.com/fluxcd/source-controller/pull/1104) +- Update dependencies; switch to `go-git/go-git` and `pkg/tar` + [#1105](https://github.com/fluxcd/source-controller/pull/1105) + +## 1.0.0-rc.3 + +**Release date:** 2023-05-12 + +This release candidate introduces the verification of the Artifact digest in +storage during reconciliation. This ensures that the Artifact is not tampered +with after it was written to storage. When the digest does not match, the +controller will emit a warning event and remove the file from storage, forcing +the Artifact to be re-downloaded. + +In addition, files with executable permissions are now archived with their mode +set to `0o744` instead of `0o644`. Allowing the extracted file to be executable +by the user. + +Lastly, the controller's dependencies were updated to mitigate CVE-2023-1732 +and CVE-2023-2253, and the controller base image was updated to Alpine 3.18. + +Improvements: +- Verify digest of Artifact in Storage + [#1088](https://github.com/fluxcd/source-controller/pull/1088) +- build(deps): bump github.com/cloudflare/circl from 1.3.2 to 1.3.3 + [#1092](https://github.com/fluxcd/source-controller/pull/1092) +- build(deps): bump github.com/docker/distribution from 2.8.1+incompatible to 2.8.2+incompatible + [#1093](https://github.com/fluxcd/source-controller/pull/1093) +- storage: set `0o744` for files with exec mode set + [#1094](https://github.com/fluxcd/source-controller/pull/1094) + +## 1.0.0-rc.2 + +**Release date:** 2023-05-09 + +This release candidate comes with various updates to the controller's dependencies, +most notable, Helm was updated to v3.11.3. + +Improvements: +- Update dependencies + [#1086](https://github.com/fluxcd/source-controller/pull/1086) +- Set RecoverPanic globally across controllers + [#1077](https://github.com/fluxcd/source-controller/pull/1077) +- Move controllers to internal/controller + [#1076](https://github.com/fluxcd/source-controller/pull/1076) + +## 1.0.0-rc.1 + +**Release date:** 2023-03-30 + +This release candidate promotes the `GitRepository` API from `v1beta2` to `v1`. +The controller now supports horizontal scaling using +sharding based on a label selector. + +In addition, support for Azure Workload Identity was added to +`OCIRepositories`, `Buckets` and `HelmRepositories` when using `provider: azure`. + +### Highlights + +#### API changes + +The `GitRepository` kind was promoted from v1beta2 to v1 (GA) and deprecated fields were removed. + +The common types `Artifact`, `Conditions` and the `Source` interface were promoted to v1. + +The `gitrepositories.source.toolkit.fluxcd.io` CRD contains the following versions: +- v1 (storage version) +- v1beta2 (deprecated) +- v1beta1 (deprecated) + +#### Upgrade procedure + +The `GitRepository` v1 API is backwards compatible with v1beta2, except for the following: +- the deprecated field `.spec.gitImplementation` was removed +- the unused field `.spec.accessFrom` was removed +- the deprecated field `.status.contentConfigChecksum` was removed +- the deprecated field `.status.artifact.checksum` was removed +- the `.status.url` was removed in favor of the absolute `.status.artifact.url` + +To upgrade from v1beta2, after deploying the new CRD and controller, +set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that +contain `GitRepository` definitions and remove the deprecated fields if any. +Bumping the API version in manifests can be done gradually. +It is advised to not delay this procedure as the beta versions will be removed after 6 months. + +#### Sharding + +Starting with this release, the controller can be configured with +`--watch-label-selector`, after which only objects with this label will +be reconciled by the controller. + +This allows for horizontal scaling, where source-controller +can be deployed multiple times with a unique label selector +which is used as the sharding key. + +Note that this also requires configuration of the `--storage-adv-addr` +to a unique address (in combination with a proper Service definition). +This to ensure the Artifacts handled by the sharding controller point +to a unique endpoint. + +In addition, Source object kinds which have a dependency on another +kind (i.e. a HelmChart on a HelmRepository) need to have the same +labels applied to work as expected. + +### Full changelog + +Improvements: +- GA: Promote `GitRepository` API to `source.toolkit.fluxcd.io/v1` + [#1056](https://github.com/fluxcd/source-controller/pull/1056) +- Add reconciler sharding capability based on label selector + [#1059](https://github.com/fluxcd/source-controller/pull/1059) +- Support Azure Workload Identity + [#1048](https://github.com/fluxcd/source-controller/pull/1048) +- Update dependencies + [#1062](https://github.com/fluxcd/source-controller/pull/1062) +- Update workflows + [#1054](https://github.com/fluxcd/source-controller/pull/1054) + +## 0.36.1 + +**Release date:** 2023-03-20 + +This release fixes a bug where after reading a `.sourceignore` file in a +subdirectory, the controller could start to ignore files from directories next +to the directory the `.sourceignore` file was placed in. + +Fixes: +- Update sourceignore to fix pattern domain bug + [#1050](https://github.com/fluxcd/source-controller/pull/1050) + +## 0.36.0 + +**Release date:** 2023-03-08 + +This release changes the format of the Artifact `Revision` field when using a +GitRepository with a `.spec.ref.name` set (introduced in [`v0.35.0`](#0350)), +changing it from `sha1:` to `@sha1:`. Offering a more +precise reflection of the revision the Artifact was created from. + +In addition, `klog` is now configured to log using the same logger as the rest +of the controller (providing a consistent log format). + +Lastly, the controller is now built using Go `1.20`, and the dependencies have +been updated to their latest versions. + +Improvements: +- Advertise absolute reference in Artifact for GitRepository name ref + [#1036](https://github.com/fluxcd/source-controller/pull/1036) +- Update Go to 1.20 + [#1040](https://github.com/fluxcd/source-controller/pull/1040) +- Update dependencies + [#1040](https://github.com/fluxcd/source-controller/pull/1040) + [#1041](https://github.com/fluxcd/source-controller/pull/1041) + [#1043](https://github.com/fluxcd/source-controller/pull/1043) +- Use `logger.SetLogger` to also configure `klog` + [#1044](https://github.com/fluxcd/source-controller/pull/1044) + +## 0.35.2 + +**Release date:** 2023-02-23 + +This release reduces the amount of memory consumed by the controller when +reconciling HelmRepositories, by using only the digest of the YAML file as the +Revision of the Artifact instead of the stable sorted version of the entire +index. This aligns with the behavior before `v0.35.0`, and is therefore +considered a bug fix. + +In addition, the dependencies have been updated to include some minor security +patches. + +Note that `v0.35.0` contains breaking changes. Please refer to the [changelog +entry](#0350) for more information. + +Fixes: +- helm: only use Digest to calculcate index revision + [#1035](https://github.com/fluxcd/source-controller/pull/1035) + +Improvements: +- Update dependencies + [#1036](https://github.com/fluxcd/source-controller/pull/1036) + +## 0.35.1 + +**Release date:** 2023-02-17 + +This release addresses a hypothetical issue with the Artifact `Digest` field +validation, where a patch of the Artifact could fail to be applied to an object +due to the lack of an `omitempty` tag on the optional field. In reality, this +issue is not possible to encounter, as the `Digest` field is always set when +the Artifact is created. + +Note that `v0.35.0` contains breaking changes. Please refer to the [changelog +entry](#0350) for more information. + +Fixes: +- api: omit empty Digest in Artifact + [#1031](https://github.com/fluxcd/source-controller/pull/1031) + +## 0.35.0 + +**Release date:** 2023-02-16 + +This release introduces a new format for the Artifact `Revision`, and deprecates +the `Checksum` field in favor of a new `Digest` field. In addition, it adds +support for Git reference names in a GitRepository, and comes with the usual +collection of dependency updates. + +### Highlights + +#### Support for Git reference names + +Starting with this version, it is possible to define a [Git Reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References) +in a GitRepository using `.spec.ref.name`. + +This opens the door to a range of functionalities not available before, as it +for example allows the controller to follow pull (`refs/pull//head`) or +merge (`refs/merge-requests//head`) requests, and allows a transition from +the HEAD of a branch (`refs/heads/main`) to a tag (`refs/tags/v0.1.0`) by +changing a single field value. + +Refer to the [GitRepository specification](https://github.com/fluxcd/source-controller/blob/v0.35.0/docs/spec/v1beta2/gitrepositories.md#name-example) +for more details. + +#### Introduction of Artifact Digest + +The Artifact of a Source will now advertise a `Digest` field containing the +checksum of the file advertised in the `Path`, and the alias of the algorithm +used to calculate it. Creating a "digest" in the format of `:`. + +The algorithm is configurable using the newly introduced `--artifact-digest-algo` +flag, which allows configuration of other algorithms (`sha384`, `sha512`, and +`blake3`) than the hardcoded `sha256` default of the [now deprecated `Checksum` +field](#deprecation-of-artifact-checksum). + +Please note that until the `Checksum` is fully deprecated, changing the +algorithm is not yet advised (albeit supported), as this will result in a +double computation. + +### :warning: Breaking changes + +#### Artifact Revision format + +The `Revision` format for an Artifact consisting of a named pointer (a Git +branch or tag) and/or a specific revision (a Git commit SHA or other calculated +checksum) has changed to contain an `@` separator opposed to `/`, and includes +the algorithm alias as a prefix to a checksum (creating a "digest"). +In addition, `HEAD` is no longer used as a named pointer for exact commit +references, but will now only advertise the commit itself. + +For example: + +- `main/1eabc9a41ca088515cab83f1cce49eb43e84b67f` => `main@sha1:1eabc9a41ca088515cab83f1cce49eb43e84b67f` +- `HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738` => `sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738` +- `tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc` => `tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc` +- `8fb62a09c9e48ace5463bf940dc15e85f525be4f230e223bbceef6e13024110c` => `sha256:8fb62a09c9e48ace5463bf940dc15e85f525be4f230e223bbceef6e13024110c` + +When the storage of the controller is backed by a Persistent Volume, the +rollout of this new format happens for the next new revision the controller +encounters. Otherwise, the new revision will be advertised as soon as the +Artifact has been reproduced after the controller is deployed. + +Other Flux controllers making use of an Artifact are aware of the change in +format, and work with it in a backwards compatible manner. Avoiding observing +a change of revision when this is actually just a change of format. If you +programmatically make use of the Revision, please refer to [the +`TransformLegacyRevision` helper](https://github.com/fluxcd/source-controller/blob/api/v0.35.0/api/v1beta2/artifact_types.go#L121) +to allow a transition period in your application. + +For more information around this change, refer to +[RFC-0005](https://github.com/fluxcd/flux2/tree/main/rfcs/0005-artifact-revision-and-digest#establish-an-artifact-revision-format). + +#### Deprecation of Artifact Checksum + +The `Checksum` field of an Artifact has been deprecated in favor of the newly +introduced `Digest`. Until the deprecated field is removed in the next version +of the API, the controller will continue to produce the SHA-256 checksum in +addition to the digest. Changing the algorithm used to produce the digest using +`--artifact-digest-algo` is therefore not yet advised (albeit supported), as +this will result in a double computation. + +For more information around this change, refer to +[RFC-0005](https://github.com/fluxcd/flux2/tree/main/rfcs/0005-artifact-revision-and-digest#introduce-a-digest-field). + +### Full changelog + +Improvements: +- Introduction of Digest and change of Revision format + [#1001](https://github.com/fluxcd/source-controller/pull/1001) +- Improve HelmRepository type switching from default to oci + [#1016](https://github.com/fluxcd/source-controller/pull/1016) +- Apply default permission mode to all files/dirs in an artifact archive + [#1020](https://github.com/fluxcd/source-controller/pull/1020) +- Add support for checking out Git references + [#1026](https://github.com/fluxcd/source-controller/pull/1026) +- Update dependencies + [#1025](https://github.com/fluxcd/source-controller/pull/1025) + [#1028](https://github.com/fluxcd/source-controller/pull/1028) + [#1030](https://github.com/fluxcd/source-controller/pull/1030) + +Fixes: +- Normalize Helm repository URL with query params properly + [#1015](https://github.com/fluxcd/source-controller/pull/1015) +- Prevent panic when cloning empty Git repository + [#1021](https://github.com/fluxcd/source-controller/pull/1021) + +## 0.34.0 + +**Release date:** 2023-01-31 + +This prerelease comes with support for HTTPS bearer token authentication for Git +repository. The GitRepository authentication Secret is expected to contain the +bearer token in `.data.bearerToken`. + +The caching of Secret and ConfigMap resources is disabled by +default to improve memory usage. To opt-out from this behavior, start the +controller with: `--feature-gates=CacheSecretsAndConfigMaps=true`. + +All the Source kinds now support progressive status updates. The progress made +by the controller during reconciliation of a Source is reported immediately in +the status of the Source object. + +In addition, the controller dependencies have been updated to Kubernetes v1.26. + +:warning: **Breaking change:** When using SSH authentication in GitRepository, +if the referenced Secret contained `.data.username`, it was used as the SSH +user. With this version, SSH user will be the username in the SSH address. For +example, if the Git repository address is `ssh://flux@example.com`, `flux` will +be used as the SSH user during SSH authentication. When no username is +specified, `git` remains the default SSH user. + +Improvements: +- Garbage collection lock file ignore tests + [#992](https://github.com/fluxcd/source-controller/pull/992) +- purge minio test container at the end of tests + [#993](https://github.com/fluxcd/source-controller/pull/993) +- Introduce Progressive status + [#974](https://github.com/fluxcd/source-controller/pull/974) +- build(deps): bump github.com/containerd/containerd from 1.6.10 to 1.6.12 + [#997](https://github.com/fluxcd/source-controller/pull/997) +- fix typo in helmRepo secretRef spec CRD + [#996](https://github.com/fluxcd/source-controller/pull/996) +- Fix OCIRepository testdata permissions + [#998](https://github.com/fluxcd/source-controller/pull/998) +- Set rate limiter option in test reconcilers + [#999](https://github.com/fluxcd/source-controller/pull/999) +- Update git dependencies for bearer token support + [#1003](https://github.com/fluxcd/source-controller/pull/1003) +- Document support for bearer token authentication over https in gitrepositories + [#1000](https://github.com/fluxcd/source-controller/pull/1000) +- Disable caching of secrets and configmaps + [#989](https://github.com/fluxcd/source-controller/pull/989) +- Update dependencies + [#1008](https://github.com/fluxcd/source-controller/pull/1008) +- build: Enable SBOM and SLSA Provenance + [#1009](https://github.com/fluxcd/source-controller/pull/1009) +- Add note about sourceignore recursion + [#1007](https://github.com/fluxcd/source-controller/pull/1007) +- CI: Replace engineerd/setup-kind with helm/kind-action + [#1010](https://github.com/fluxcd/source-controller/pull/1010) +- helm/oci: Add context to chart download failure + [#1013](https://github.com/fluxcd/source-controller/pull/1013) + +## 0.33.0 + +**Release date:** 2022-12-20 + +This prerelease comes with dedicated mux for the controller's fileserver. All code references to `libgit2` were removed, and the `spec.gitImplementation` +field is no longer being honored, but rather `go-git` is used. +For more information, refer to version 0.32.0's changelog, which started `libgit2`'s +deprecation process. + +The controller's garbage collection now takes into consideration +lock files. + +The feature gate `ForceGoGitImplementation` was removed, users passing it as their +controller's startup args will need to remove it before upgrading. + +Fixes: +- git: Fix issue with recurseSubmodules + [#975](https://github.com/fluxcd/source-controller/pull/975) +- Fix aliased chart dependencies resolution + [#988](https://github.com/fluxcd/source-controller/pull/988) + +Improvements: +- fileserver: Use new ServeMux + [#972](https://github.com/fluxcd/source-controller/pull/972) +- Remove libgit2 and git2go from codebase + [#977](https://github.com/fluxcd/source-controller/pull/977) +- Use Event v1 API metadata keys in notifications + [#990](https://github.com/fluxcd/source-controller/pull/990) +- storage: take lock files into consideration while garbage collecting + [#991](https://github.com/fluxcd/source-controller/pull/991) +- Migrate to Go Native fuzz and improve reliability + [#965](https://github.com/fluxcd/source-controller/pull/965) +- build: Add tidy to make verify + [#966](https://github.com/fluxcd/source-controller/pull/966) +- build: Add postbuild script for fuzzing + [#968](https://github.com/fluxcd/source-controller/pull/968) +- build: Link libgit2 via LIB_FUZZING_ENGINE + [#969](https://github.com/fluxcd/source-controller/pull/969) +- GitRepo: git impl. deprecation test cleanup + [#980](https://github.com/fluxcd/source-controller/pull/980) +- minio: use container image for tests + [#981](https://github.com/fluxcd/source-controller/pull/981) +- helm: Update SDK to v3.10.3 + [#982](https://github.com/fluxcd/source-controller/pull/982) +- Update fluxcd/pkg/oci dependency + [#983](https://github.com/fluxcd/source-controller/pull/983) +- Update dependencies + [#985](https://github.com/fluxcd/source-controller/pull/985) + +## 0.32.1 + +**Release date:** 2022-11-18 + +This prerelease rectifies the `v0.32.0` release by retracting the previous Go +version, bumping the controller api version and the controller deployment. + +## 0.32.0 + +**Release date:** 2022-11-17 + +This prerelease comes with a major refactoring of the controller's Git operations. +The `go-git` implementation now supports all Git servers, including +Azure DevOps, which previously was only supported by `libgit2`. + +This version initiates the soft deprecation of the `libgit2` implementation. +The motivation for removing support for `libgit2` being: +- Reliability: over the past months we managed to substantially reduce the +issues users experienced, but there are still crashes happening when the controller +runs over longer periods of time, or when under intense GC pressure. +- Performance: due to the inherit nature of `libgit2` implementation, which +is a C library called via CGO through `git2go`, it will never perform as well as +a pure Go implementations. At scale, memory pressure insues which then triggers +the reliability issues above. +- Lack of Shallow Clone Support. +- Maintainability: supporting two Git implementations is a big task, even more +so when one of them is in a complete different tech stack. Given its nature, to +support `libgit2`, we have to maintain an additional repository. Statically built +`libgit2` libraries need to be cross-compiled for all our supported platforms. +And a lot of "unnecessary" code has to be in place to make building, testing and +fuzzing work seamlessly. + +As a result the field `spec.gitImplementation` is ignored and the +reconciliations will use `go-git`. To opt-out from this behaviour, start +the controller with: `--feature-gates=ForceGoGitImplementation=false`. + +Users having any issues with `go-git` should report it to the Flux team, +so any issues can be resolved before support for `libgit2` is completely +removed from the codebase. + +Improvements: +- Refactor Git operations and introduce go-git support for Azure DevOps and AWS CodeCommit + [#944](https://github.com/fluxcd/source-controller/pull/944) +- Use Flux Event API v1beta1 + [#952](https://github.com/fluxcd/source-controller/pull/952) +- gogit: Add new ForceGoGitImplementation FeatureGate + [#945](https://github.com/fluxcd/source-controller/pull/945) +- Remove nsswitch.conf creation from Dockerfile + [#958](https://github.com/fluxcd/source-controller/pull/958) +- Update dependencies + [#960](https://github.com/fluxcd/source-controller/pull/960) + [#950](https://github.com/fluxcd/source-controller/pull/950) + [#959](https://github.com/fluxcd/source-controller/pull/959) +- Upgrade to azure-sdk-for-go/storage/azblob v0.5.1 + [#931](https://github.com/fluxcd/source-controller/pull/931) + +## 0.31.0 + +**Release date:** 2022-10-21 + +This prerelease comes with support for Cosign verification of Helm charts. +The signatures verification can be configured by setting `HelmChart.spec.verify` with +`provider` as `cosign` and a `secretRef` to a secret containing the public key. +Cosign keyless verification is also supported, please see the +[HelmChart API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.31.0/docs/spec/v1beta2/helmcharts.md#verification) +for more details. + +In addition, the controller dependencies have been updated +to Kubernetes v1.25.3 and Helm v3.10.1. + +Improvements: +- Implement Cosign verification for HelmCharts + [#925](https://github.com/fluxcd/source-controller/pull/925) +- Update dependencies + [#942](https://github.com/fluxcd/source-controller/pull/942) + +Fixes: +- Allow deleting suspended objects + [#937](https://github.com/fluxcd/source-controller/pull/937) + +## 0.30.1 + +**Release date:** 2022-10-10 + +This prerelease enables the use of container-level SAS tokens when using `Bucket` objects +to access Azure Storage. The Azure SDK error message has also been enriched to hint Flux +users the potential reasons in case of failure. + +Improvements: +- List objects when checking if bucket exists to allow use of container-level SAS token + [#906](https://github.com/fluxcd/source-controller/pull/906) + +## 0.30.0 + +**Release date:** 2022-09-29 + +This prerelease adds support for Cosign verification in `OCIRepository` source. +The signatures verification can be configured by setting `OCIRepository.spec.verify` with +`provider` as `cosign` and a `secretRef` to a secret containing the public key. +Cosign keyless verification is also supported, please see the +[OCIRepository API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.30.0/docs/spec/v1beta2/ocirepositories.md#verification) +for more details. + +It also comes with strict validation rules for API fields which define a +(time) duration. Effectively, this means values without a time unit (e.g. `ms`, +`s`, `m`, `h`) will now be rejected by the API server. To stimulate sane +configurations, the units `ns`, `us` and `µs` can no longer be configured, nor +can `h` be set for fields defining a timeout value. + +In addition, the controller dependencies have been updated +to Kubernetes controller-runtime v0.13. + +:warning: **Breaking changes:** +- `.spec.interval` new validation pattern is `"^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"` +- `.spec.timeout` new validation pattern is `"^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"` + +Improvements: +- api: add custom validation for v1.Duration types + [#903](https://github.com/fluxcd/source-controller/pull/903) +- [RFC-0003] Implement OCIRepository verification using Cosign + [#876](https://github.com/fluxcd/source-controller/pull/876) +- Consider bipolarity conditions in Ready condition summarization + [#907](https://github.com/fluxcd/source-controller/pull/907) +- Update Bucket related SDK dependencies + [#911](https://github.com/fluxcd/source-controller/pull/911) +- Add custom CA certificates to system certificates + [#904](https://github.com/fluxcd/source-controller/pull/904) +- [OCIRepository] Optimise OCI artifacts reconciliation + [#913](https://github.com/fluxcd/source-controller/pull/913) +- Update dependencies + [#919](https://github.com/fluxcd/source-controller/pull/919) +- Build with Go 1.19 + [#920](https://github.com/fluxcd/source-controller/pull/920) +- Bump libgit2 image and disable cosign verification for CI + [#921](https://github.com/fluxcd/source-controller/pull/921) +- OCIRepositoryReconciler no-op improvements + [#917](https://github.com/fluxcd/source-controller/pull/917) +- Accept a slice of remote.Option for cosign verification + [#916](https://github.com/fluxcd/source-controller/pull/916) +- Update pkg/oci to v0.11.0 + [#922](https://github.com/fluxcd/source-controller/pull/922) + +Fixes: +- Handle nil OCI authenticator with malformed registry + [#897](https://github.com/fluxcd/source-controller/pull/897) + +## 0.29.0 + +**Release date:** 2022-09-09 + +This prerelease adds support for non-TLS container registries such +as [Kubernetes Kind Docker Registry](https://kind.sigs.k8s.io/docs/user/local-registry/). +Connecting to an in-cluster registry over plain HTTP, +requires setting the `OCIRepository.spec.insecure` field to `true`. + +:warning: **Breaking change:** The controller logs have been aligned +with the Kubernetes structured logging. For more details on the new logging +structure please see: [fluxcd/flux2#3051](https://github.com/fluxcd/flux2/issues/3051). + +Improvements: +- Align controller logs to Kubernetes structured logging + [#882](https://github.com/fluxcd/source-controller/pull/882) +- [OCIRepository] Add support for non-TLS insecure container registries + [#881](https://github.com/fluxcd/source-controller/pull/881) +- Fuzz optimisations + [#886](https://github.com/fluxcd/source-controller/pull/886) + +Fixes: +- [OCI] Static credentials should take precedence over the OIDC provider + [#884](https://github.com/fluxcd/source-controller/pull/884) + +## 0.28.0 + +**Release date:** 2022-08-29 + +This prerelease adds support for contextual login to container registries when pulling +Helm charts from Azure Container Registry, Amazon Elastic Container Registry +and Google Artifact Registry. Contextual login for `HelmRepository` +objects can be enabled by setting the `spec.provider` field to `azure`, `aws` or `gcp`. + +Selecting the OCI layer containing Kubernetes manifests is now possible +when defining `OCIRepository` objects by setting the `spec.layerSelector.mediaType` field. + +In addition, the controller dependencies have been updated to Kubernetes v1.25.0 and Helm v3.9.4. + +Improvements: +- [HelmRepository] Enable contextual login for OCI + [#873](https://github.com/fluxcd/source-controller/pull/873) +- [OCIRepository] Select layer by media type + [#871](https://github.com/fluxcd/source-controller/pull/871) +- Update Kubernetes packages to v1.25.0 + [#875](https://github.com/fluxcd/source-controller/pull/875) +- Update dependencies + [#869](https://github.com/fluxcd/source-controller/pull/869) +- Ensure Go 1.18 for fuzz image + [#872](https://github.com/fluxcd/source-controller/pull/872) + +## 0.27.0 + +**Release date:** 2022-08-17 + +This prerelease adds support for SAS Keys when authenticating against Azure Blob Storage +and improves the documentation for `OCIRepository`. + +The package `sourceignore`, which is used for excluding files from Flux internal artifacts, +has been moved to `fluxcd/pkg/sourceignore`. + +Improvements: +- OCIRepo docs: auto-login setup details + [#862](https://github.com/fluxcd/source-controller/pull/862) +- Add Support for SAS keys in Azure Blob + [#738](https://github.com/fluxcd/source-controller/pull/738) +- Use sourceignore from fluxcd/pkg/sourceignore + [#864](https://github.com/fluxcd/source-controller/pull/864) +- Update dependencies + [#869](https://github.com/fluxcd/source-controller/pull/869) + +## 0.26.1 + +**Release date:** 2022-08-11 + +This prerelease comes with panic recovery, to protect the controller from crashing +when reconciliations lead to a crash. It also adds OCI documentation and improvements +to the controllers CI pipeline. + +Improvements: +- Enable panic recovery + [#859](https://github.com/fluxcd/source-controller/pull/859) +- build: Ignore CI workflows for markdown files + [#858](https://github.com/fluxcd/source-controller/pull/858) +- oci: Document Auto-Login usage in SC + [#860](https://github.com/fluxcd/source-controller/pull/860) + +## 0.26.0 + +**Release date:** 2022-08-08 + +This prerelease comes with a new API kind named `OCIRepository`, +for fetching OCI artifacts from container registries as defined in +[RFC-0003 Flux OCI support for Kubernetes manifests](https://github.com/fluxcd/flux2/tree/main/rfcs/0003-kubernetes-oci). +Please see the +[OCIRepository API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.26.0/docs/spec/v1beta2/ocirepositories.md) +for more details. + +In addition, Helm charts stored in Git can now have dependencies to +other charts stored as OCI artifacts in container registries. + +Features: +- Implement OCIRepository reconciliation + [#788](https://github.com/fluxcd/source-controller/pull/788) + +Improvements: +- Enable Umbrella Chart with dependencies from OCI repositories + [#770](https://github.com/fluxcd/source-controller/pull/770) +- Allow for charts from OCI registries to specify a chart path + [#856](https://github.com/fluxcd/source-controller/pull/856) +- Remove MUSL and enable threadless libgit2 support + [#853](https://github.com/fluxcd/source-controller/pull/853) +- Upgrade to Go 1.18 + [#816](https://github.com/fluxcd/source-controller/pull/816) +- Update Azure Go SDK to v1.1.0 + [#786](https://github.com/fluxcd/source-controller/pull/786) + +Fixes: +- fix(openapi): full regex for url to prevent error + [#838](https://github.com/fluxcd/source-controller/pull/838) + +## 0.25.11 + +**Release date:** 2022-07-27 + +This prerelease comes with an improvement in the Helm OCI Chart to use an exact +version when provided. This makes it possible to work with registries that don't +support listing tags. + +Improvements: +- Don't fetch tags when exact version is specified in HelmChart + [#846](https://github.com/fluxcd/source-controller/pull/846) + +## 0.25.10 + +**Release date:** 2022-07-13 + +This prerelease fixes SIGSEGV when resolving charts dependencies. +It also brings CI improvements and update dependencies to patch upstream CVEs. + +Fixes: +- Fix SIGSEGV when resolving charts dependencies + [#827](https://github.com/fluxcd/source-controller/pull/827) +- Fix Panic when no artifact in source + [#832](https://github.com/fluxcd/source-controller/pull/832) + +Improvements: +- Update go-yaml to v3.0.1 + [#804](https://github.com/fluxcd/source-controller/pull/804) +- build: provenance and tampering checks for libgit2 + [#823](https://github.com/fluxcd/source-controller/pull/823) +- Decrease fs perms to 0o700 + [#818](https://github.com/fluxcd/source-controller/pull/818) +- build: run darwin tests on macos 10.15, 11 and 12 + [#817](https://github.com/fluxcd/source-controller/pull/817) +- Minor comment updates + [#812](https://github.com/fluxcd/source-controller/pull/812) +- Split GitHub workflows + [#811](https://github.com/fluxcd/source-controller/pull/811) +- docs: Add password-protected SSH keys information + [#801](https://github.com/fluxcd/source-controller/pull/801) +- Bump Helm to v3.9.1 + [#833](https://github.com/fluxcd/source-controller/pull/833) +- Update libgit2 to v1.3.2 + [#834](https://github.com/fluxcd/source-controller/pull/834) + +## 0.25.9 + +**Release date:** 2022-06-29 + +This prerelease fixes an authentication issue for Helm OCI where the credentials +were cached instead of being discarded after each reconciliation. + +Fixes: +- helm-oci: disable cache in oci registry client + [#799](https://github.com/fluxcd/source-controller/pull/799) +- helm-oci: remove the trailing slash in `spec.url` + [#799](https://github.com/fluxcd/source-controller/pull/799) + +## 0.25.8 + +**Release date:** 2022-06-24 + +This prerelease fixes an authentication issue when using libgit2 managed +transport to checkout repos on BitBucket server. + +Fixes: +- set request auth if both username and password are non empty + [#794](https://github.com/fluxcd/source-controller/pull/794) + +Improvements: +- libgit2/managed/http: test for incomplete creds + [#796](https://github.com/fluxcd/source-controller/pull/796) + +## 0.25.7 + +**Release date:** 2022-06-22 + +This prerelease comes with an improvement in the SSH managed transport error +messages related to known hosts check and removes a deadlock in the SSH smart +subtransport. + +Fixes: +- libgit2: remove deadlock + [#785](https://github.com/fluxcd/source-controller/pull/785) + +Improvements: +- libgit2: improve known_hosts error messages + [#783](https://github.com/fluxcd/source-controller/pull/783) + +## 0.25.6 + +**Release date:** 2022-06-14 + +This prerelease fixes an issue with leaked SSH connections on +managed transport and adds some general build and libgit2 +improvements. + +Fixes: +- libgit2: dispose connections in SubTransport.Close + [#775](https://github.com/fluxcd/source-controller/pull/775) + +Improvements: +- build: enable -race for go test + [#615](https://github.com/fluxcd/source-controller/pull/615) +- libgit2: refactor tests to use managed and unmanaged transport cleanly + [#777](https://github.com/fluxcd/source-controller/pull/777) +- libgit2: add contextual logging to subtransports + [#778](https://github.com/fluxcd/source-controller/pull/778) +- libgit2: fix managed transport enabled flag update + [#781](https://github.com/fluxcd/source-controller/pull/781) + +## 0.25.5 + +**Release date:** 2022-06-08 + +This prerelease fixes a regression for SSH host key verification +and fixes semver sorting for Helm OCI charts. + +In addition, the controller dependencies have been updated to Kubernetes v1.24.1. + +Fixes: +- helm: Fix sorting semver from OCI repository tags + [#769](https://github.com/fluxcd/source-controller/pull/769) +- libgit2: Fix SSH host key verification regression + [#771](https://github.com/fluxcd/source-controller/pull/771) + +Improvements: +- libgit2: Improve HTTP redirection observability + [#772](https://github.com/fluxcd/source-controller/pull/772) +- Update dependencies + [#773](https://github.com/fluxcd/source-controller/pull/773) + +## 0.25.4 + +**Release date:** 2022-06-07 + +This prerelease fixes a regression when accessing Gitlab via HTTPS +when the URL does not have the '.git' suffix. Plus some small +documentation fixes and dependency updates. + +Fixes: +- Update link to v1beta2 in the API spec + [#764](https://github.com/fluxcd/source-controller/pull/764) +- libgit2: fix gitlab redirection for HTTP + [#765](https://github.com/fluxcd/source-controller/pull/765) + +Improvements: +- Update dependencies + [#766](https://github.com/fluxcd/source-controller/pull/766) + +## 0.25.3 + +**Release date:** 2022-06-06 + +This prerelease fixes a regression in HelmRepository index caching. + +Fixes: +- Fix repository cache regression + [#761](https://github.com/fluxcd/source-controller/pull/761) + +## 0.25.2 + +**Release date:** 2022-06-03 + +This prerelease fixes a bug which prevented the use of the `OptimizedGitClones` +feature when using tags to checkout a Git repository, and adds docs on how to +access Azure Blob using managed identities and aad-pod-identity. + +Improvements: +- Add docs on managed identity for Azure Blob + [#752](https://github.com/fluxcd/source-controller/pull/752) + +Fixes: +- libgit2: return CheckoutTag with LastRevision + [#755](https://github.com/fluxcd/source-controller/pull/755) +- Log on new artifact and failure recovery + [#759](https://github.com/fluxcd/source-controller/pull/759) + +## 0.25.1 + +**Release date:** 2022-06-02 + +This prerelease fixes some race conditions in the libgit2 managed ssh smart +subtransport. + +Fixes: +- libgit2/managed: fix race issues in ssh transport + [#753](https://github.com/fluxcd/source-controller/pull/753) + +## 0.25.0 + +**Release date:** 2022-06-01 + +This prerelease adds support for Helm OCI. Users can specify `.spec.type` of +a `HelmRepository` to use an OCI repository instead of an HTTP/S Helm repository. + +Please note that this currently has a couple of limitations (which will be addressed in a future release): +* Chart dependencies from OCI repositories are not supported. [#722](https://github.com/fluxcd/source-controller/issues/722) +* Custom CA certificates are not supported. [#723](https://github.com/fluxcd/source-controller/issues/723) + +An example of OCI `HelmRepository` can be found [here](https://github.com/fluxcd/source-controller/blob/api/v0.25.0/docs/spec/v1beta2/helmrepositories.md#helm-oci-repository). + +A new flag `--feature-gate` has been added to disable/enable new experimental +features. It works in a similar manner to [Kubernetes feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). + +The libgit2 managed transport feature has been enabled by default. Furthermore, +a few changes have been made to make the feature more stable and enable quicker +clones. Users that want to opt out and use the unmanaged transports may do so +by passing the flag `--feature-gate=GitManagedTransport=false`, but please note +that we encourage users not to do so. + +GitRepository reconciliation has been made more efficient by adding support for +no-op clones, when checking out repositories using branches or tags. +This feature is also enabled by default, and users can opt out +by passing the flag `--feature-gate=OptimizedGitClones=false`. +Please note that this feature is only active when the managed transport feature +is enabled. Disabling managed transports, quietly disables optimzed Git clones. + +Improvements: +- Optimise clone operations + [#665](https://github.com/fluxcd/source-controller/pull/665) +- [RFC 0002] Flux OCI support for Helm + [#690](https://github.com/fluxcd/source-controller/pull/690) +- Add Git test coverage for supported algorithms + [#708](https://github.com/fluxcd/source-controller/pull/708) +- Add new flag --ssh-hostkey-algos + [#711](https://github.com/fluxcd/source-controller/pull/711) +- libgit2: Disable connection caching + [#713](https://github.com/fluxcd/source-controller/pull/713) +- Update dependencies + [#717](https://github.com/fluxcd/source-controller/pull/717) +- libgit2: enable managed transport by default + [#718](https://github.com/fluxcd/source-controller/pull/718) +- libgit2: Add support for hashed known_hosts + [#720](https://github.com/fluxcd/source-controller/pull/720) +- Remove dependency on libgit2 credentials callback + [#727](https://github.com/fluxcd/source-controller/pull/727) +- Update Alpine to v3.16 + [#731](https://github.com/fluxcd/source-controller/pull/731) +- Update dependencies + [#739](https://github.com/fluxcd/source-controller/pull/739) +- libgit2: enforce context timeout + [#740](https://github.com/fluxcd/source-controller/pull/740) +- libgit2: Pass ctx to all the transport opts + [#743](https://github.com/fluxcd/source-controller/pull/743) + +Fixes: +- Ensure git status is checked at the correct time + [#575](https://github.com/fluxcd/source-controller/pull/575) +- libgit2: recover from git2go panic + [#707](https://github.com/fluxcd/source-controller/pull/707) +- Remove minio region + [#715](https://github.com/fluxcd/source-controller/pull/715) +- GitRepositoryReconciler no-op clone improvements + [#724](https://github.com/fluxcd/source-controller/pull/724) +- Support dockerconfigjson with OCI HelmRepositories + [#725](https://github.com/fluxcd/source-controller/pull/725) +- log when the OCI temp credentials file can't be deleted + [#726](https://github.com/fluxcd/source-controller/pull/726) +- Helm reconcilers conditions and test improvements + [#728](https://github.com/fluxcd/source-controller/pull/728) +- reconcile: Set observed gen only when conditions exist + [#729](https://github.com/fluxcd/source-controller/pull/729) +- helmrepo: Fix test flake in type update test + [#730](https://github.com/fluxcd/source-controller/pull/730) +- Fix tests failing in Ubuntu + [#732](https://github.com/fluxcd/source-controller/pull/732) +- tests: ignore proxy settings when running tests + [#734](https://github.com/fluxcd/source-controller/pull/734) +- gitrepo: gitCheckout() return typed errors only + [#736](https://github.com/fluxcd/source-controller/pull/736) +- gitrepo: set conditions in gitCheckout + [#741](https://github.com/fluxcd/source-controller/pull/741) +- libgit2: Enable tests + [#744](https://github.com/fluxcd/source-controller/pull/744) +- OCI HelmRepo: handle status conditions in-line + [#748](https://github.com/fluxcd/source-controller/pull/748) +- registry: repo URL and dockerconfig URL mismatch + [#749](https://github.com/fluxcd/source-controller/pull/749) +- libgit2: fix checkout logic for CheckoutBranch + [#750](https://github.com/fluxcd/source-controller/pull/750) + +## 0.24.4 + +**Release date:** 2022-05-03 + +This prerelease comes with dependency updates, and improvements around edge-case +handling of reconcile result calculations ensuring an object is always requeued +when its Status Conditions equal `Ready=False` and `Stalled!=True`. + +Improvements: +- summarize: Consider obj status condition in result + [#703](https://github.com/fluxcd/source-controller/pull/703) +- Update dependencies + [#705](https://github.com/fluxcd/source-controller/pull/705) + +Fixes: +- docs: Remove all traces of "identity.pub" from docs + [#699](https://github.com/fluxcd/source-controller/pull/699) +- test: use `T.TempDir` to create temporary test directory + [#701](https://github.com/fluxcd/source-controller/pull/701) +- Switch to gen-crd-api-reference-docs from master + [#702](https://github.com/fluxcd/source-controller/pull/702) + +## 0.24.3 + +**Release date:** 2022-04-28 + +This prerelease prevents `Reconciling` and `ArtifactOutdated` conditions from +being set on a `HelmRepository` when the checksum of a cached repository index +changes. + +Fixes: +- helmrepo: same revision different checksum scenario + [#691](https://github.com/fluxcd/source-controller/pull/691) + +## 0.24.2 + +**Release date:** 2022-04-26 + +This prerelease improves the memory consumption while reconciling a +`HelmRepository`, by only validating the YAML of a fetched index when the +checksum of the retrieved file differs from the current Artifact. + +Fixes: +- helm: optimise repository index loading + [#685](https://github.com/fluxcd/source-controller/pull/685) +- tests: Fix flakiness of git related tests + [#686](https://github.com/fluxcd/source-controller/pull/686) + +## 0.24.1 + +**Release date:** 2022-04-22 + +This prerelease fixes a regression bug where the controller would panic in +further to be identified edge-case scenarios in which a `HelmRepository` +Artifact would not have a size. + +Fixes: +- Fix panic when HelmRepository's artifact size is nil + [#683](https://github.com/fluxcd/source-controller/pull/683) + +## 0.24.0 + +**Release date:** 2022-04-19 + +This prerelease enables the Helm chart dependency manager to make use of the +opt-in memory cache introduced in `v0.23.0`, revises the file permissions set +by the controller, and updates various dependencies. + +:warning: **Breaking change:** From this release on, the `RUNTIME_NAMESPACE` +environment variable is no longer taken into account to configure the +advertised HTTP/S address of the storage. Instead, [variable +substitution](https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/#define-an-environment-dependent-variable-for-a-container) +must be used, as described in [the changelog entry for `v0.5.2`](#052). + +Improvements: +- Change all file permissions to octal format + [#653](https://github.com/fluxcd/source-controller/pull/653) +- Enable dependency manager to use in-memory cache + [#667](https://github.com/fluxcd/source-controller/pull/667) +- Update libgit2 image to v1.3.1 + [#671](https://github.com/fluxcd/source-controller/pull/671) +- Remove hostname hyphen split block + [#672](https://github.com/fluxcd/source-controller/pull/672) +- Update dependencies + [#675](https://github.com/fluxcd/source-controller/pull/675) + [#676](https://github.com/fluxcd/source-controller/pull/676) + [#677](https://github.com/fluxcd/source-controller/pull/677) + +## 0.23.0 + +**Release date:** 2022-04-12 + +This prerelease introduces new retention options for Garbage Collection, +a new opt-in in-memory cache for `HelmRepository` index files, improves +notifications following reconciling failures, brings ways to configure +Key Exchange Algorithms, plus some extra housekeeping awesomeness. + +Garbage Collection is enabled by default, and now its retention options +are configurable with the flags: `--artifact-retention-ttl` (default: `60s`) +and `--artifact-retention-records` (default: `2`). They define the minimum +time to live and the maximum amount of artifacts to survive a collection. + +A new notification is now emitted to identify recovery from failures. It +is triggered when a failed reconciliation is followed by a successful one, and +the notification message is the same that's sent in usual successful source +reconciliation message about the stored artifact. + +The opt-in in-memory cache for `HelmRepository` addresses issues where the +index file is loaded and unmarshalled in concurrent reconciliation resulting +in a heavy memory footprint. It can be configured using the flags: +`--helm-cache-max-size`, `--helm-cache-ttl`, `--helm-cache-purge-interval`. + +The Key Exchange Algorithms used when establishing SSH connections are +based on the defaults configured upstream in `go-git` and `golang.org/x/crypto`. +Now this can be overriden with the flag `--ssh-kex-algos`. Note this applies +to the `go-git` gitImplementation or the `libgit2` gitImplementation but +_only_ when Managed Transport is being used. + +Managed Transport for `libgit2` now introduces self-healing capabilities, +to recover from failure when long-running connections become stale. + +The exponental back-off retry can be configured with the new flags: +`--min-retry-delay` (default: `750ms`) and `--max-retry-delay` +(default: `15min`). Previously the defaults were set to `5ms` and `1000s`, +which in some cases impaired the controller's ability to self-heal +(e.g. retrying failing SSH connections). + + +Introduction of a secure directory loader which improves the handling +of Helm charts paths. + +Improvements: +- update toolkit.fluxcd.io docs links + [#651](https://github.com/fluxcd/source-controller/pull/651) +- Add optional in-memory cache of HelmRepository index files + [#626](https://github.com/fluxcd/source-controller/pull/626) +- Add flag to allow configuration of SSH kex algos + [#655](https://github.com/fluxcd/source-controller/pull/655) +- Garbage collect with provided retention options + [#638](https://github.com/fluxcd/source-controller/pull/638) +- Avoid event logging GC failure + [#659](https://github.com/fluxcd/source-controller/pull/659) +- Add notify() in all the reconcilers + [#624](https://github.com/fluxcd/source-controller/pull/624) +- Remove leftover timeout in reconcilers + [#660](https://github.com/fluxcd/source-controller/pull/660) +- libgit2: managed transport improvements + [#658](https://github.com/fluxcd/source-controller/pull/658) +- helm: introduce customized chart loaders + [#663](https://github.com/fluxcd/source-controller/pull/663) +- Add flags to configure exponential back-off retry + [#664](https://github.com/fluxcd/source-controller/pull/664) + +## 0.22.5 + +**Release date:** 2022-03-30 + +This prerelease improves the Status API of the Source objects to +reflect more accurate Status Condition information. + +In addition, it also fixes a bug in `go-git` implementation due to which cloning +public Git repository failed without any credentials since version `0.17.0`, and +some general stability improvements in the libgit2 experimental managed +transport. + +Improvements: +- Align fuzzing deps + [#644](https://github.com/fluxcd/source-controller/pull/644) +- Separate positive polarity conditions for ArtifactInStorage + [#646](https://github.com/fluxcd/source-controller/pull/646) +- Removes empty credentials from Basic Auth + [#648](https://github.com/fluxcd/source-controller/pull/648) +- libgit2: fix access to nil t.stdin and improve observability + [#649](https://github.com/fluxcd/source-controller/pull/649) + +## 0.22.4 + +**Release date:** 2022-03-28 + +This prerelease improves on the experimental managed transport overall +stability. Changes of note: +- SSH connections now being reused across git operations. +- Leaked HTTP connections are now fixed. +- The long-standing SSH intermittent errors are addressed by the cached connections. + +Fixes: +- Various fixes for managed transport + [#637](https://github.com/fluxcd/source-controller/pull/637) + +## 0.22.3 + +**Release date:** 2022-03-25 + +This prerelease fixes a regression bug introduced in `v0.22.0`, which would +cause a `GitRepository` to end up in a `Stalled` state if an include did not +have an Artifact available. + +Fixes: +- gitrepo: Do not stall when no included artifact + [#639](https://github.com/fluxcd/source-controller/pull/639) +- Fix dpanic issue when logging odd number of args + [#641](https://github.com/fluxcd/source-controller/pull/641) + +## 0.22.2 + +**Release date:** 2022-03-23 + +This prerelease ensures (Kubernetes) Event annotations are prefixed with the +FQDN of the Source API Group. For example, `revision` is now +`source.toolkit.fluxcd.io/revision`. + +This to facilitate improvements to the notification-controller, where +annotations prefixed with the FQDN of the Group of the Involved Object will be +transformed into "fields". + +Fixes: +- Prefix event annotations with API Group FQDN + [#632](https://github.com/fluxcd/source-controller/pull/632) + +## 0.22.1 + +**Release date:** 2022-03-23 + +This prerelease fixes a regression in which `.sourceignore` rules for a +`GitRepository` would not be matched correctly. + +Fixes: +- fix: configure domain for .sourceignore rules + [#629](https://github.com/fluxcd/source-controller/pull/629) + +## 0.22.0 + +**Release date:** 2022-03-17 + +This prerelease comes with new reconcilers which make use of `fluxcd/pkg` +utilities for common runtime operations, and graduates the API to `v1beta2`. + +:warning: **It is required** to update the source-controller Custom Resource +Definitions on your cluster and/or in Git. + +### Breaking changes + +- `Bucket` resources do now take the provided etag for object storage items + into account during the calculation of the revision. As a result, items will + no longer be downloaded on every reconcile if none of them have changed. +- `HelmChart` resources do now advertise the observed chart name + (`.status.observedChartName`) and Source (reference) Artifact revision + (`.status.observedSourceArtifactRevision`) in the Status. The information is + used to more efficiently react to source revision and/or chart changes. + +### Features and Improvements + +#### API specifications in a user-friendly format + +[The new specifications for the `v1beta2` API](https://github.com/fluxcd/source-controller/tree/v0.22.0/docs/spec/v1beta2) +have been written in a new format with the aim to be more valuable to a user. +Featuring separate sections with examples, and information on how to write +and work with them. + +#### Artifact now advertises size + +The size (in bytes) of a tarball Artifact is now advertised in the Size +(`.size`) field of the Artifact. This can be utilized by users to e.g. quickly +see if `.sourceignore` rules have an effect, or be displayed in a UI. + +#### Azure Blob Storage support for `Bucket` resources + +The `.spec.provider` of a `Bucket` resource can now be set to `azure` to +instruct the controller to use the +[Azure Blob Storage SDK](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme) +while fetching objects. This allows for authenticating using Service +Principals, Managed Identities and Shared Keys. + +For more information, see the +[Bucket spec about the Azure provider](https://github.com/fluxcd/source-controller/blob/v0.22.0/docs/spec/v1beta2/buckets.md#azure). + +#### Enhanced Kubernetes Conditions + +Source API resources will now advertise more explicit Condition types (more +about the types in "API changes"), provide `Reconciling` and `Stalled` +Conditions where applicable for +[better integration with `kstatus`](https://github.com/kubernetes-sigs/cli-utils/blob/master/pkg/kstatus/README.md#conditions), +and record the Observed Generation on the Condition. + +For a detailed overview per Source type, refer to the spec: + +- [GitRepository](https://github.com/fluxcd/source-controller/blob/v0.22.0/docs/spec/v1beta2/gitrepositories.md#conditions) +- [HelmRepository](https://github.com/fluxcd/source-controller/blob/v0.22.0/docs/spec/v1beta2/helmrepositories.md#conditions) +- [HelmChart](https://github.com/fluxcd/source-controller/blob/v0.22.0/docs/spec/v1beta2/helmcharts.md#conditions) +- [Bucket](https://github.com/fluxcd/source-controller/blob/v0.22.0/docs/spec/v1beta2/buckets.md#conditions) + +#### Enhanced Kubernetes Events (and notifications) + +The Kubernetes Events the controller emits have been reworked to provide a +proper reason, and more informative messages. +Users making use of the notification-controller will notice this as well, as +this same information is used to compose notifications. + +#### Experimental managed transport for `libgit2` Git implementation + +The `libgit2` Git implementation supports a new experimental transport to +improve reliability, adding timeout enforcement for Git network operations. +Opt-in by setting the environment variable `EXPERIMENTAL_GIT_TRANSPORT` to +`true` in the controller's Deployment. This will result in the low-level +transport being handled by the controller, instead of `libgit2`. It may result +in an increased number of timeout messages in the logs, however it will remove +the ability of Git operations to make the controllers hang indefinitely. + +#### Reuse of HTTP/S transport for Helm repository index and chart downloads + +The Helm dependency has been updated to `v3.8.1`, with a patch applied from +https://github.com/helm/helm/pull/10568. Using this patch, the HTTP transports +are now managed by the controller, to prevent the clogging of thousands of open +TCP connections on some instances. + +#### Update of `libgit2` Git implementation to `v1.3.x` + +The `libgit2` Git implementation has been updated to `v1.3.x`, allowing us to +provide better error signals for authentication, certificate and transport +failures. Effectively, this means that instead of a `unable to clone: User` +error string, a descriptive one is now given. + +In addition, `NO_PROXY` settings are now properly taken into account. + +#### Preparation of support for `rsa-ssh2-256/512` + +The dependency on `golang.org/x/crypto` has been updated to +`v0.0.0-20220315160706-3147a52a75dd`, as preparation of support for +`rsa-ssh2-256/512`. This should theoretically work out of the box for +`known_hosts` entries and `go-git` Git provider credentials, but has not been +widely tested. + +### API changes + +The `source.toolkit.fluxcd.io/v1beta2` API is backwards compatible with `v1beta1`. + +- Introduction of `Reconciling` and `Stalled` Condition types for [better + integration with `kstatus`](https://github.com/kubernetes-sigs/cli-utils/blob/master/pkg/kstatus/README.md#conditions). +- Introduction of new Condition types to provide better signals and failure + indications: + * `ArtifactOutdated`: indicates the current Artifact of the Source is outdated. + * `SourceVerified`: indicates the integrity of the Source has been verified. + * `FetchFailed`: indicates a transient or persistent fetch failure of the + upstream Source. + * `BuildFailed`: indicates a transient or persistent build failure of a + Source's Artifact. + * `StorageOperationFailed`: indicates a transient or persistent failure + related to storage. + * `IncludeUnavailable`: indicates an include is not available. For example, + because it does not exist, or does not have an Artifact. +- Introduction of a Size (in bytes) field (`.status.artifact.size`) in the + Artifact object. +- Introduction of `ObservedChartName` (`.status.observedChartName`) and + `ObservedSourceArtifactRevision` (`.status.observedSourceArtifactRevision`) + fields in the `HelmChart` Status. +- Introduction of `azure` provider implementation for `Bucket`. + +Updating the manifests in Git to `v1beta2` can be done at any time after the +source-controller upgrade. + +### Full list of changes + +- Upgrade to golang-with-libgit2:1.1.1.6 and use static libraries for in + development + [#562](https://github.com/fluxcd/source-controller/pull/562) +- Initial fuzzing tests + [#572](https://github.com/fluxcd/source-controller/pull/572) +- Validate libgit2 args are set correctly + [#574](https://github.com/fluxcd/source-controller/pull/574) +- Download libgit2 libraries for fuzzing + [#572](https://github.com/fluxcd/source-controller/pull/577) +- Upgrade libgit2 to 1.3.0 and git2go to v33 + [#573](https://github.com/fluxcd/source-controller/pull/573) +- pkg/git: Include commit message and URL in checkout error + [#579](https://github.com/fluxcd/source-controller/pull/579) +- Add support for multiple fuzz sanitizers + [#580](https://github.com/fluxcd/source-controller/pull/580) +- Upgrade controller-runtime to v0.11.1 and docker/distribution to v2.8.0 + [#583](https://github.com/fluxcd/source-controller/pull/583) +- Move to `v1beta2` API and rewrite reconcilers + [#586](https://github.com/fluxcd/source-controller/pull/586) +- git/libgit2: set CheckoutForce on branch strategy + [#589](https://github.com/fluxcd/source-controller/pull/589) +- Reuse transport for Helm downloads + [#590](https://github.com/fluxcd/source-controller/pull/590) +- Update object API version in the sample configs + [#591](https://github.com/fluxcd/source-controller/pull/591) +- api: Move Status in CRD printcolumn to the end + [#592](https://github.com/fluxcd/source-controller/pull/592) +- Update github.com/sosedoff/gitkit to v0.3.0 (CVE fix) + [#594](https://github.com/fluxcd/source-controller/pull/594) +- Remove redundant reconciling condition in reconcileArtifact + [#595](https://github.com/fluxcd/source-controller/pull/595) +- Implement Size field on archived artifacts + [#597](https://github.com/fluxcd/source-controller/pull/597) +- Add native Azure Blob support + [#598](https://github.com/fluxcd/source-controller/pull/598) +- Experimental managed transport for libgit2 operations + [#606](https://github.com/fluxcd/source-controller/pull/606) +- Update Helm to patched v3.8.1 + [#609](https://github.com/fluxcd/source-controller/pull/609) +- Add new condition StorageOperationFailedCondition + [#612](https://github.com/fluxcd/source-controller/pull/612) +- Prioritize StorageOperationFailedCondition + [#613](https://github.com/fluxcd/source-controller/pull/613) +- Update dependencies + [#600](https://github.com/fluxcd/source-controller/pull/600) + [#616](https://github.com/fluxcd/source-controller/pull/616) +- api/v1beta2: add note on Condition polarity + [#622](https://github.com/fluxcd/source-controller/pull/622) + +## 0.21.2 + +**Release date:** 2022-02-07 + +This prerelease changes the default timeout of `GitRepositories` and `Buckets` from `20s` to `60s`. +When using the `libgit2` Git implementation, increasing the timeout helps avoid +`Error waiting on socket` intermittent SSH cloning failures. + +Improvements: +- Increase default timeout to 60s + [#570](https://github.com/fluxcd/source-controller/pull/570) + +## 0.21.1 + +**Release date:** 2022-01-27 + +This prerelease comes with a bug fix to ensure the `libgit2` Git implementation +respects hostnames with and without port while matching against `known_hosts`. + +Fixes: +- Fix host mismatch in libgit2 + [#561](https://github.com/fluxcd/source-controller/pull/561) + +## 0.21.0 + +**Release date:** 2022-01-26 + +This prerelease comes with changes to the base image used to build and +run the controller, replacing Debian Unstable (Sid) with Alpine 3.15. +The controller is now statically built and includes libgit2 along with +its main dependencies. + +The controller container images are signed with +[Cosign and GitHub OIDC](https://github.com/sigstore/cosign/blob/22007e56aee419ae361c9f021869a30e9ae7be03/KEYLESS.md), +and a Software Bill of Materials in [SPDX format](https://spdx.dev) has been published on the release page. + +Starting with this version, the controller deployment conforms to the +Kubernetes [restricted pod security standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted): +- all Linux capabilities were dropped +- the root filesystem was set to read-only +- the seccomp profile was set to the runtime default +- run as non-root was enabled +- the filesystem group was set to 1337 +- the user and group ID was set to 65534 + +**Breaking changes**: +- The use of new seccomp API requires Kubernetes 1.19. +- The controller container is now executed under 65534:65534 (userid:groupid). + This change may break deployments that hard-coded the user ID of 'controller' in their PodSecurityPolicy. + +Improvements: +- Statically build using musl toolchain and target alpine + [#558](https://github.com/fluxcd/source-controller/pull/558) +- Publish SBOM and sign release artifacts + [#550](https://github.com/fluxcd/source-controller/pull/550) +- security: Drop capabilities, set userid and enable seccomp + [#521](https://github.com/fluxcd/source-controller/pull/521) +- docs: Add git proxy support docs + [#547](https://github.com/fluxcd/source-controller/pull/547) +- libgit2: Configured libgit2 clone ProxyOptions + [#524](https://github.com/fluxcd/source-controller/pull/524) +- storage: include directories in artifact tarball + [#543](https://github.com/fluxcd/source-controller/pull/543) +- Add Permissions to GitHub Workflows + [#551](https://github.com/fluxcd/source-controller/pull/551) +- Update git2go to v31.7.6 + [#554](https://github.com/fluxcd/source-controller/pull/554) +- Update dev docs + [#555](https://github.com/fluxcd/source-controller/pull/555) + +Fixes: +- e2e: Set timeout to fix intermittent errors + [#549](https://github.com/fluxcd/source-controller/pull/549) +- git/libgit2: Fix failing tests when the default branch is not "master" + [#545](https://github.com/fluxcd/source-controller/pull/545) +- Remove temp file name from Helm index cache err + [#540](https://github.com/fluxcd/source-controller/pull/540) +- Fix makefile envtest and controller-gen usage + [#539](https://github.com/fluxcd/source-controller/pull/539) +- Update file close operation to not use defer and add test case for CopyFromPath + [#538](https://github.com/fluxcd/source-controller/pull/538) +- Fix the missing protocol for the first port in manager config + [#556](https://github.com/fluxcd/source-controller/pull/556) + +## 0.20.1 + +**Release date:** 2022-01-07 + +This prerelease comes with an update for `github.com/containerd/containerd` to `v1.5.9` +to please static security analysers and fix any warnings for CVE-2021-43816. + +Improvements: +- Log the error when tmp cleanup fails + [#533](https://github.com/fluxcd/source-controller/pull/533) +- Update containerd to v1.5.9 (fix CVE-2021-43816) + [#532](https://github.com/fluxcd/source-controller/pull/532) + +## 0.20.0 + +**Release date:** 2022-01-05 + +This prerelease comes with an update to the Kubernetes and controller-runtime dependencies +to align them with the Kubernetes 1.23 release, including an update of Helm to `v3.7.2`. + +In addition, the controller is now built with Go 1.17, and +`github.com/containerd/containerd` was updated to `v1.5.8` to please +static security analysers and fix any warnings for GHSA-5j5w-g665-5m35. + +Improvements: +- Update Go to v1.17 + [#473](https://github.com/fluxcd/source-controller/pull/473) +- Update build dependencies + [#520](https://github.com/fluxcd/source-controller/pull/520) +- Update containerd to v1.5.8 (fix GHSA-5j5w-g665-5m35) + [#529](https://github.com/fluxcd/source-controller/pull/529) + +## 0.19.2 + +**Release date:** 2021-12-09 + +This prerelease ensures the API resources are not prematurely marked as `Ready` +by tools like `kstatus`, while the controller has not observed a newly created +resource yet, by defaulting the `ObservedGeneration` in the status of the +resource to `-1`. + +In addition, it changes the faulty `URL` column for `Bucket` resources to +`Endpoint`, and updates `github.com/opencontainers/runc` to `v1.0.3` to please +static security analysers and fix any warnings for CVE-2021-43784. + +Improvements: +- crds: set default observedGeneration to -1 + [#517](https://github.com/fluxcd/source-controller/pull/517) +- Update github.com/opencontainers/runc to v1.0.3 (fix CVE-2021-43784) + [#518](https://github.com/fluxcd/source-controller/pull/518) + +Fixes: +- Change bucket JSONPath from URL to endpoint + [#514](https://github.com/fluxcd/source-controller/pull/514) + +## 0.19.1 + +**Release date:** 2021-12-03 + +This prerelease changes the length of the SHA hex added to the SemVer metadata +of a `HelmChart`, when `ReconcileStrategy` is set to `Revision`, to a short SHA +hex of the first 12 characters. This is to prevent situations in which the +SemVer would exceed the length limit of 63 characters when utilized in a Helm +chart as a label value. + +Concrete example: `1.2.3+a4303ff0f6fb560ea032f9981c6bd7c7f146d083.1` becomes +`1.2.3+a4303ff0f6fb.1` + +:warning: There have been additional user reports about charts complaining +about a `+` character in the label: + +``` +metadata.labels: Invalid value: "1.2.3+a4303ff0f6fb560ea032f9981c6bd7c7f146d083.1": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?') +``` + +Given the [Helm chart best practices mention to replace this character with a +`_`](https://helm.sh/docs/chart_best_practices/conventions/#version-numbers), +we encourage you to patch this in your (upstream) chart. +Pseudo example using [template functions](https://helm.sh/docs/chart_template_guide/function_list/): + +```yaml +{{- replace "+" "_" .Chart.Version | trunc 63 }} +``` + +Fixes: +- controllers: use short SHA in chart SemVer meta + [#507](https://github.com/fluxcd/source-controller/pull/507) + +## 0.19.0 + +**Release date:** 2021-11-23 + +For this prerelease we focused on improving the logic around Helm resources, +with as goal to be more efficient, and increase code and testing quality. + +It contains **breaking behavioral changes** to `HelmRepository` and +`HelmChart` resources: + +- Helm repository index files and/or charts **must** not exceed the new declared + runtime default limits to [avoid out-of-memory crashes](https://github.com/fluxcd/source-controller/issues/470), + overwriting the default configuration is possible. + + | Type | Default max size **(in MiB)** | Option flag to overwrite | + |---|---|---| + | Helm repository index | 50MiB | `--helm-index-max-size=` | + | Helm chart | 10MiB | `--helm-chart-max-size=` | + | Singe file from Helm chart | 5MiB | `--helm-chart-file-max-size=` | + +- Using `ValuesFiles` in a `HelmChart` will now append a `.` to the SemVer + metadata of the packaged chart and the revision of the Artifact. For example, + `v1.2.3+.5` for a `HelmChart` resource with generation `5`. This ensures consumers + of the chart are able to notice changes to the merged values without the underlying + chart source (revision) changing. + +While an optional ACL field has been added to the API resources, there is no +implementation at time of release. + +Improvements: +- helm: factor out logic from controller into package + [#485](https://github.com/fluxcd/source-controller/pull/485) +- Add ACL option field to Source API + [#495](https://github.com/fluxcd/source-controller/pull/495) +- Update various dependencies to mitigate CVE warning + [#493](https://github.com/fluxcd/source-controller/pull/493) +- Update controller-runtime to v0.10.2 + [#497](https://github.com/fluxcd/source-controller/pull/497) +- Update github.com/minio/minio-go to `v7.0.15` + [#498](https://github.com/fluxcd/source-controller/pull/498) +- internal/helm: LoadChartMetadataFromArchive improvements + [#502](https://github.com/fluxcd/source-controller/pull/502) +- internal/helm: validate loaded chart metadata obj + [#503](https://github.com/fluxcd/source-controller/pull/503) + +Fixes: +- tests: ensure proper garbage collection + [#489](https://github.com/fluxcd/source-controller/pull/489) +- controllers: Fix helmchart values file merge test + [#494](https://github.com/fluxcd/source-controller/pull/494) +- Update test shield link + [#496](https://github.com/fluxcd/source-controller/pull/496) +- controllers: absolute local path for cached chart + [#500](https://github.com/fluxcd/source-controller/pull/500) +- Various small fixes across the code base + [#501](https://github.com/fluxcd/source-controller/pull/501) + +## 0.18.0 + +**Release date:** 2021-11-12 + +This prerelease changes the format of the artifact checksum from `SHA1` to `SHA256` +to mitigate chosen-prefix and length extension attacks. + +Improvements: +* storage: change Artifact checksum to SHA256 + [#487](https://github.com/fluxcd/source-controller/pull/487) + +## 0.17.2 + +**Release date:** 2021-11-04 + +This prerelease comes with a bug fix to ensure the `libgit2` Git implementation +respects the operation `timeout` specified in `GitRepositorySpec`. + +Fixes: +* libgit2: ensure context timeout cancels transfer + [#477](https://github.com/fluxcd/source-controller/pull/477) + +## 0.17.1 + +**Release date:** 2021-10-30 + +This prerelease fixes a pointer error that was returned in v0.17.0 during +the import of public keys to verify a commit. + +Fixes: +* Fix pointer error during public key import + [#479](https://github.com/fluxcd/source-controller/pull/479) + +## 0.17.0 + +**Release date:** 2021-10-28 + +For this prerelease we focused on further improving the Git implementations, partly +to increase stability and test coverage, partly to ensure they are prepared to be +moved out into a separate module. With this work, it is now possible to define just +a Git commit as a reference, which will result in an `Artifact` with a `Revision` +format of `HEAD/`. + +For the `go-git` implementation, defining the branch and a commit reference will +result in a more efficient shallow clone, and using this information when it is +available to you is therefore encouraged. + +Improvements: +* git: refactor authentication, checkout and verification + [#462](https://github.com/fluxcd/source-controller/pull/462) + +Fixes: +* libgit2: handle EOF in parseKnownHosts() + [#475](https://github.com/fluxcd/source-controller/pull/475) + +## 0.16.1 + +**Release date:** 2021-10-22 + +This prerelease adds support for GCP storage authentication using the +`GOOGLE_APPLICATION_CREDENTIALS` environment variable available in the container, +or by defining a `secretRef` with a `serviceaccount` JSON data blob. See +[#434](https://github.com/fluxcd/source-controller/pull/434) for more information. + +In addition, several bug fixes and improvements have been made to the `libgit2` +Git implementation, ensuring the checkout logic is more rigorously tested. + +During this work, it was discovered that both Git implementation had a minor bug +resulting in `v` prefixed tags with metadata added to it (e.g. `v0.1.0+build-1` +and `v0.1.0+build-2`) were not properly sorted by their commit timestamp, which +has been addressed as well. + +Improvements: +* Add GCP storage authentication + [#434](https://github.com/fluxcd/source-controller/pull/434) + +Fixes: +* libgit2: correctly resolve (annotated) tags + [#457](https://github.com/fluxcd/source-controller/pull/457) +* libgit2: add remaining checkout strategy tests + [#458](https://github.com/fluxcd/source-controller/pull/458) +* git: ensure original tag is used for TS lookup + [#459](https://github.com/fluxcd/source-controller/pull/459) + +## 0.16.0 + +**Release date:** 2021-10-08 + +This prerelease improves the configuration of the `libgit2` C library, solving +most issues around private key formats (e.g. PKCS#8 and ED25519) by ensuring +it is linked against OpenSSL and LibSSH2. + +In addition, the `HelmChart` resource does now allow setting a `ReconcileStrategy` +to define when a new artifact for a chart should be created for charts from +`Bucket` and `GitRepository` sources. By setting this to `Revision`, you no +longer have to bump the version in the `Chart.yaml` file, but a new chart will +automatically be made available when the revision of the Source changes. + +Fixes: +* Update containerd and runc to fix CVEs + [#446](https://github.com/fluxcd/source-controller/pull/446) + +Improvements: +* Add reconcile strategy for HelmCharts + [#308](https://github.com/fluxcd/source-controller/pull/308) +* Update github.com/libgit2/git2go to v31.6.1 + [#437](https://github.com/fluxcd/source-controller/pull/437) + +## 0.15.4 + +**Release date:** 2021-08-05 + +This prerelease comes with a series of bug fixes, and updates the Kubernetes +dependencies to `v1.21.3` and Helm to `v3.6.3`. + +Fixes: +* Fix tag checkout with libgit2 + [#394](https://github.com/fluxcd/source-controller/pull/394) +* Take relative paths in account for Bucket revision + [#403](https://github.com/fluxcd/source-controller/pull/403) +* Ensure rel path never traverses outside Storage + [#417](https://github.com/fluxcd/source-controller/pull/417) +* Use same SemVer logic in both Git implementations + [#417](https://github.com/fluxcd/source-controller/pull/417) +* storage: strip env specific data during archive + [#417](https://github.com/fluxcd/source-controller/pull/417) + +Improvements: +* e2e: Update Kubernetes to v1.21 + [#396](https://github.com/fluxcd/source-controller/pull/396) +* Update Helm to v3.6.3 + [#400](https://github.com/fluxcd/source-controller/pull/400) +* Add setup-envtest in Makefile + [#404](https://github.com/fluxcd/source-controller/pull/404) +* Use ObjectKeyFromObject instead of ObjectKey + [#405](https://github.com/fluxcd/source-controller/pull/405) +* Drop deprecated `io/ioutil` + [#409](https://github.com/fluxcd/source-controller/pull/409) +* Update dependencies + [#416](https://github.com/fluxcd/source-controller/pull/416) + +## 0.15.3 + +**Release date:** 2021-06-29 + +This prerelease comes with a bug fix to the Git tag checkout when using `libgit2`. + +Fixes: +* Fix tag checkout with libgit2 + [#394](https://github.com/fluxcd/source-controller/pull/394) + +## 0.15.2 + +**Release date:** 2021-06-22 + +This prerelease updates the build constraints for `libgit2`, ensuring +the underlying `libssh2-1-dev` dependency is linked against +`libssl-dev` instead of `libgcrypt` so that PKCS* private keys can +be used without any issues. + +Fixes: +* Use libgit2 from "unstable" / "sid" + [#391](https://github.com/fluxcd/source-controller/pull/391) + +## 0.15.1 + +**Release date:** 2021-06-18 + +This prerelease updates the Helm dependency to `v3.6.1`, this update +is a security update and ensures credentials are only passed to the +defined URL in a `HelmRelease`. + +**Note:** there have been reports from the Helm user community that +this new behavior may cause issues with Helm repository providers +like Artifactory. If this happens to be a problem for you, the +behavior can be disabled by setting `PassCredentials` in the +`HelmRepositorySpec`. + +For more details, see: +https://github.com/helm/helm/security/advisories/GHSA-56hp-xqp3-w2jf + +Improvements: +* Update Helm to v3.6.1 + [#388](https://github.com/fluxcd/source-controller/pull/388) + +## 0.15.0 + +**Release date:** 2021-06-17 + +This prerelease comes with changes to the base image used to build +the controller, replacing Alpine with Debian slim. This change +allows the controller to run on ARM64, previously broken in v0.14.0. + +Improvements: +* Use Debian instead of Alpine for multi-arch builds + [#386](https://github.com/fluxcd/source-controller/pull/386) +* Panic on non-nil AddToScheme errors in main init + [#387](https://github.com/fluxcd/source-controller/pull/387) + +## 0.14.0 + +**Release date:** 2021-06-09 + +This prerelease comes with an update to the Kubernetes and controller-runtime +dependencies to align them with the Kubernetes 1.21 release, including an update +of Helm to `v3.6.0`. + +After a failed ARMv7 build during the initial release attempt of this version, +`binutils-gold` has been introduced to the `builder` image [to allow `gccgo` to +build using the Gold linker](https://golang.org/doc/install/gccgo#Gold). + +Improvements: +* Update K8s, controller-runtime and fluxcd/pkg deps + [#374](https://github.com/fluxcd/source-controller/pull/374) +* Add nightly builds workflow + [#376](https://github.com/fluxcd/source-controller/pull/376) + +Fixes: +* Reinstate Git cloning timeout + [#372](https://github.com/fluxcd/source-controller/pull/372) +* Use `binutils-gold` in builder image + [#377](https://github.com/fluxcd/source-controller/pull/377) +* Use `github.repository` property for image name + [#378](https://github.com/fluxcd/source-controller/pull/378) + +## 0.13.2 + +**Release date:** 2021-06-02 + +This prerelease comes with an update to the `go-git` implementation +dependency, bumping the version to `v5.4.2`. This should resolve any +issues with `object not found` and `empty git-upload-pack given` +errors that were thrown for some Git repositories since `0.13.0`. + +Fixes: +* Update go-git to v5.4.2 + [#370](https://github.com/fluxcd/source-controller/pull/370) + +## 0.13.1 + +**Release date:** 2021-05-28 + +This prerelease comes with a bug fix to the `GitRepository` include feature. + +Fixes: +* Fix GitRepository include for nested paths + [#367](https://github.com/fluxcd/source-controller/pull/367) + +## 0.13.0 + +**Release date:** 2021-05-26 + +This prerelease comes with support for including the contents of a Git repository into another. + +The [include feature](https://github.com/fluxcd/source-controller/blob/api/v0.13.0/docs/spec/v1beta1/gitrepositories.md#including-gitrepository) +has multiple benefits over regular Git submodules: + +* Including a `GitRepository` allows you to use different authentication methods for different repositories. +* A change in the included repository will trigger an update of the including repository. +* Multiple `GitRepositories` could include the same repository, which decreases the amount of cloning done compared to using submodules. + +Features: +* Add include property to GitRepositories + [#348](https://github.com/fluxcd/source-controller/pull/348) + +Improvements: +* Update Git packages + [#365](https://github.com/fluxcd/source-controller/pull/365) + +## 0.12.2 + +**Release date:** 2021-05-10 + +This prerelease comes with a bug fix to `Bucket` source ignore +handling. + +Fixes: +* Split bucket item key by `/` to satisfy matcher + [#356](https://github.com/fluxcd/source-controller/pull/356) + +## 0.12.1 + +**Release date:** 2021-04-23 + +This prerelease comes with a bug fix to source ignore handling. + +Fixes: +* Configure ignore domain for GitRepository rules + [#351](https://github.com/fluxcd/source-controller/pull/351) + +## 0.12.0 + +**Release date:** 2021-04-21 + +This prerelease comes with support for SSH keys with a passphrase. + +The `.sourceignore` files are now loaded by traversing through the directory tree, +instead of just looking at the root. + +The HelmChart `ValueFile` string field has been deprecated in favour of +`ValuesFiles` string array. + +Features: +* Support SSH private key with password + [#338](https://github.com/fluxcd/source-controller/pull/338) + [#339](https://github.com/fluxcd/source-controller/pull/339) +* Add `ValuesFiles` to HelmChart spec + [#305](https://github.com/fluxcd/source-controller/pull/305) + +Improvements: +* Check ignore matches before Bucket item downloads + [#337](https://github.com/fluxcd/source-controller/pull/337) +* Add short name for Git and Helm repositories + [#334](https://github.com/fluxcd/source-controller/pull/334) +* Update Helm to v3.5.4 + [#340](https://github.com/fluxcd/source-controller/pull/340) + +Fixes: +* Write chart data on identical values overwrite + [#345](https://github.com/fluxcd/source-controller/pull/345) +* Fix HelmChart values tests + [#332](https://github.com/fluxcd/source-controller/pull/332) + +## 0.11.0 + +**Release date:** 2021-03-31 + +This prerelease comes with support for +[Git submodules](https://github.com/fluxcd/source-controller/blob/api/v0.11.0/docs/spec/v1beta1/gitrepositories.md#git-submodules) +and [self-signed TLS certs](https://github.com/fluxcd/source-controller/blob/api/v0.11.0/docs/spec/v1beta1/gitrepositories.md#https-self-signed-certificates) +when using `gitProvider: go-git`. + +Features: +* Add support for Git submodules with go-git + [#327](https://github.com/fluxcd/source-controller/pull/327) +* Enable self-signed certs for go-git + [#324](https://github.com/fluxcd/source-controller/pull/324) + +Improvements: +* Add well-known CI configs to exclusion list + [#329](https://github.com/fluxcd/source-controller/pull/329) + +## 0.10.0 + +**Release date:** 2021-03-26 + +This is the tenth MINOR prerelease. + +This prerelease comes with a breaking change to the leader election ID +from `305740c0.fluxcd.io` to `source-controller-leader-election` +to be more descriptive. This change should not have an impact on most +installations, as the default replica count is `1`. If you are running +a setup with multiple replicas, it is however advised to scale down +before upgrading. + +The suspended status of resources is now recorded to a +`gotk_suspend_status` Prometheus gauge metric. + +Improvements: +* Record suspend metrics in controllers + [#311](https://github.com/fluxcd/source-controller/pull/311) +* Set leader election deadline to 30s + [#318](https://github.com/fluxcd/notification-controller/pull/318) +* Change leader election ID to be more descriptive + [#319](https://github.com/fluxcd/notification-controller/pull/319) + +## 0.9.1 + +**Release date:** 2021-03-15 + +This prerelease comes with improvements to Git clone errors and +patch updates to dependencies. + +Improvements: +* Tidy git clone errors + [#304](https://github.com/fluxcd/source-controller/pull/304) +* Update dependencies + [#307](https://github.com/fluxcd/source-controller/pull/307) + +## 0.9.0 + +**Release date:** 2021-02-23 + +This is the ninth MINOR prerelease. + +Due to changes in Helm [v3.5.2](https://github.com/helm/helm/releases/tag/v3.5.2), +charts not versioned using **strict semver** are no longer compatible with +source-controller. When using charts from Git, make sure that the `version` +field is set in `Chart.yaml`. + +Improvements: +* Update dependencies + [#299](https://github.com/fluxcd/source-controller/pull/299) +* Refactor release workflow + [#300](https://github.com/fluxcd/source-controller/pull/300) + +## 0.8.1 + +**Release date:** 2021-02-18 + +This prerelease fixes a bug where only one dependency of a Helm +chart would be included. + +Fixes: +* Copy loop iterator var for use by goroutine + [#294](https://github.com/fluxcd/source-controller/pull/294) + +## 0.8.0 + +**Release date:** 2021-02-12 + +This is the eight MINOR prerelease. + +The `libgit2` Git implementation now has support for Certificate Authority +validation for Git over HTTPS, as well as various bug fixes around working +with SSH host key fingerprints. + +Alpine has been updated to `3.13`, making it possible to move away from `edge` +for `libgit2` and `musl` dependencies. + +`pprof` endpoints have been enabled on the metrics server, making it easier to +collect runtime information to for example debug performance issues. + +Features: +* Add custom CA validation for Git over HTTPS + [#283](https://github.com/fluxcd/source-controller/pull/283) + +Improvements: +* Rename Git packages to implementations + [#270](https://github.com/fluxcd/source-controller/pull/270) +* Enable pprof endpoints on metrics server + [#282](https://github.com/fluxcd/source-controller/pull/282) +* Add fsGroup to pod security context + [#285](https://github.com/fluxcd/source-controller/pull/285) +* Use musl and libit2 packages from v3.13 branch + [#289](https://github.com/fluxcd/source-controller/pull/289) + +Fixes: +* Fix chart with custom valuesFile (0bytes tgz) + [#286](https://github.com/fluxcd/source-controller/pull/286) +* libgit2: use provided host to validate public key + [#288](https://github.com/fluxcd/source-controller/pull/288) +* libgit2: check hostkey type when validating hostkey + [#290](https://github.com/fluxcd/source-controller/pull/290) + +## 0.7.4 + +**Release date:** 2021-02-03 + +This prerelease fixes a bug where the controller tried to update dependencies +for Helm charts even when dependencies were already present. + +## 0.7.3 + +**Release date:** 2021-02-02 + +This prerelease changes the strategy of the controller's deployment to Recreate +to prevent a deadlock during upgrades and to ensure safe usage of backing +persistent (RW) volumes. + +## 0.7.2 + +**Release date:** 2021-02-01 + +This prerelease ensures the file server of the controller only starts for the +elected leader, and improves the visibility of chart name validation errors. + +## 0.7.1 + +**Release date:** 2021-01-25 + +This prerelease changes the recorded revision for a `HelmRepository` resource +to a SHA1 checksum, this to improve the detection of changes for repositories +that do not correctly update their advertised generation timestamp. + ## 0.7.0 **Release date:** 2021-01-21 @@ -369,7 +3220,7 @@ using the [notification.fluxcd.io API](https://github.com/fluxcd/notification-co **Release date:** 2020-06-24 This is the first prerelease ready for public testing. To get started -testing, see the [GitOps Toolkit guide](https://toolkit.fluxcd.io/get-started/). +testing, see the [GitOps Toolkit guide](https://fluxcd.io/flux/get-started/). ## 0.0.1-beta.2 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 29ce578b4..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,102 +0,0 @@ -# Contributing - -Source Controller is [Apache 2.0 licensed](LICENSE) and accepts contributions -via GitHub pull requests. This document outlines some of the conventions on -to make it easier to get your contribution accepted. - -We gratefully welcome improvements to issues and documentation as well as to -code. - -## Certificate of Origin - -By contributing to this project you agree to the Developer Certificate of -Origin (DCO). This document was created by the Linux Kernel community and is a -simple statement that you, as a contributor, have the legal right to make the -contribution. No action from you is required, but it's a good idea to see the -[DCO](DCO) file for details before you start contributing code to Source -Controller. - -## Communications - -The project uses Slack: To join the conversation, simply join the -[CNCF](https://slack.cncf.io/) Slack workspace and use the -[#flux](https://cloud-native.slack.com/messages/flux/) channel. - -The developers use a mailing list to discuss development as well. -Simply subscribe to [flux-dev on cncf.io](https://lists.cncf.io/g/cncf-flux-dev) -to join the conversation (this will also add an invitation to your -Google calendar for our [Flux -meeting](https://docs.google.com/document/d/1l_M0om0qUEN_NNiGgpqJ2tvsF2iioHkaARDeh6b70B0/edit#)). - -### Installing required dependencies - -The dependency [libgit2](https://libgit2.org/) needs to be installed to be able to run -Source Controller or its test-suite locally (not in a container). - -**macOS** -``` -brew install libgit2 -``` - -**Arch Linux** -``` -pacman -S libgit2 -``` - -**Building from source** - -1. Ensure [`cmake`](https://cmake.org) is available on your system. -1. Download and unarchive [the right `libgit2` version](https://github.com/libgit2/git2go#which-go-version-to-use) - for our current `git2go` dependency: - - ```console - $ LIBGIT2_VER=1.1.0 - $ curl -L https://github.com/libgit2/libgit2/releases/download/v$LIBGIT2_VER/libgit2-$LIBGIT2_VER.tar.gz -o /tmp/libgit2.tar.gz - $ tar -xvf /tmp/libgit2.tar.gz -C /tmp/libgit2-$LIBGIT2_VER - ``` -1. Build and install the library on your system: - - ```console - $ mkdir /tmp/libgit2-$LIBGIT2_VER/build && cd /tmp/libgit2-$LIBGIT2_VER/build - $ cmake .. -DCMAKE_INSTALL_PREFIX=/usr - $ sudo cmake --build . --target install - ``` - -### How to run the test suite - -You can run the unit tests by simply doing - -```bash -make test -``` - -## Acceptance policy - -These things will make a PR more likely to be accepted: - -- a well-described requirement -- tests for new code -- tests for old code! -- new code and tests follow the conventions in old code and tests -- a good commit message (see below) -- all code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) -- names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1) -- code must build on both Linux and Darwin, via plain `go build` -- code should have appropriate test coverage and tests should be written - to work with `go test` - -In general, we will merge a PR once one maintainer has endorsed it. -For substantial changes, more people may become involved, and you might -get asked to resubmit the PR or divide the changes into more than one PR. - -### Format of the Commit Message - -For Source Controller we prefer the following rules for good commit messages: - -- Limit the subject to 50 characters and write as the continuation - of the sentence "If applied, this commit will ..." -- Explain what and why in the body, if more than a trivial change; - wrap it at 72 characters. - -The [following article](https://chris.beams.io/posts/git-commit/#seven-rules) -has some more helpful advice on documenting your work. diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md new file mode 100644 index 000000000..11d05ad83 --- /dev/null +++ b/DEVELOPMENT.md @@ -0,0 +1,139 @@ +# Development + +> **Note:** Please take a look at +> to find out about how to contribute to Flux and how to interact with the +> Flux Development team. + +## Installing required dependencies + +There are a number of dependencies required to be able to run the controller and its test suite locally: + +- [Install Go](https://golang.org/doc/install) +- [Install Kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) +- [Install Docker](https://docs.docker.com/engine/install/) +- (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) + +In addition to the above, the following dependencies are also used by some of the `make` targets: + +- `controller-gen` (v0.19.0) +- `gen-crd-api-reference-docs` (v0.3.0) +- `setup-envtest` (latest) + +If any of the above dependencies are not present on your system, the first invocation of a `make` target that requires them will install them. + +## How to run the test suite + +Prerequisites: +* Go >= 1.25 + +You can run the test suite by simply doing + +```sh +make test +``` + +### Additional test configuration + +By setting the `GO_TEST_ARGS` environment variable you can pass additional flags to [`go test`](https://pkg.go.dev/cmd/go#hdr-Test_packages): + +```sh +make test GO_TEST_ARGS="-v -run=TestReadIgnoreFile/with_domain" +``` + +## How to run the controller locally + +Install the controller's CRDs on your test cluster: + +```sh +make install +``` + +Run the controller locally: + +```sh +make run +``` + +## How to install the controller + +### Building the container image + +Set the name of the container image to be created from the source code. This will be used +when building, pushing and referring to the image on YAML files: + +```sh +export IMG=registry-path/source-controller +export TAG=latest # optional +``` + +Build the container image, tagging it as `$(IMG):$(TAG)`: + +```sh +make docker-build +``` + +Push the image into the repository: + +```sh +make docker-push +``` + +Alternatively, the three steps above can be done in a single line: + +```sh +IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \ + make docker-build +``` +For an extensive list of `BUILD_ARGS`, refer to the docker [buildx build options] documentation. + +**Note:** `make docker-build` will build images for all supported architecture by default. +Limit this to a specific architecture for faster builds: + +```sh +IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push BUILD_PLATFORMS=amd64 \ + make docker-build +``` + +[buildx build options]: https://docs.docker.com/engine/reference/commandline/buildx_build/#options + +If you get the following error when building the docker container: +``` +Multiple platforms feature is currently not supported for docker driver. +Please switch to a different driver (eg. "docker buildx create --use") +``` + +you may need to create and switch to a new builder that supports multiple platforms: + +```sh +docker buildx create --use +``` + +### Deploying into a cluster + +Deploy `source-controller` into the cluster that is configured in the local kubeconfig file (i.e. `~/.kube/config`): + +```sh +make deploy +``` + +### Debugging controller with VSCode + +Create a `.vscode/launch.json` file: +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Launch Package", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/main.go", + "args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"] + } + ] +} +``` + +Start debugging by either clicking `Run` > `Start Debugging` or using +the relevant shortcut. diff --git a/Dockerfile b/Dockerfile index fbbe12ae6..0f7c6f849 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,51 +1,47 @@ +ARG GO_VERSION=1.25 +ARG XX_VERSION=1.6.1 + +FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx + # Docker buildkit multi-arch build requires golang alpine -FROM golang:1.15-alpine as builder +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder -RUN apk add gcc pkgconfig libc-dev -RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community libgit2-dev~=1.1 -# TODO: replace with non-edge musl 1.2.x when made available -# musl 1.2.x is a strict requirement of libgit2 due to time_t changes -# ref: https://musl.libc.org/time64.html -RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/main musl~=1.2 +# Copy the build utilities. +COPY --from=xx / / +ARG TARGETPLATFORM + +# Configure workspace WORKDIR /workspace -# copy api submodule +# Copy api submodule COPY api/ api/ -# copy modules manifests +# Copy modules manifests COPY go.mod go.mod COPY go.sum go.sum -# cache modules +# Cache modules RUN go mod download -# copy source code +# Copy source code COPY main.go main.go -COPY controllers/ controllers/ -COPY pkg/ pkg/ COPY internal/ internal/ -# build without specifing the arch -RUN CGO_ENABLED=1 go build -o source-controller main.go +ARG TARGETPLATFORM +ARG TARGETARCH -FROM alpine:3.12 +# build without specifing the arch +ENV CGO_ENABLED=0 +RUN xx-go build -trimpath -a -o source-controller main.go -# link repo to the GitHub Container Registry image -LABEL org.opencontainers.image.source="https://github.com/fluxcd/source-controller" +FROM alpine:3.22 -RUN apk add --no-cache ca-certificates tini -RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community libgit2~=1.1 -RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/main musl~=1.2 +ARG TARGETPLATFORM +RUN apk --no-cache add ca-certificates \ + && update-ca-certificates COPY --from=builder /workspace/source-controller /usr/local/bin/ -# Create minimal nsswitch.conf file to prioritize the usage of /etc/hosts over DNS queries. -# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-354316460 -RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf - -RUN addgroup -S controller && adduser -S -g controller controller - -USER controller - -ENTRYPOINT [ "/sbin/tini", "--", "source-controller" ] +USER 65534:65534 +ENTRYPOINT [ "source-controller" ] diff --git a/MAINTAINERS b/MAINTAINERS index 31595221d..3a1bb4156 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2,7 +2,9 @@ The maintainers are generally available in Slack at https://cloud-native.slack.com in #flux (https://cloud-native.slack.com/messages/CLAJ40HV3) (obtain an invitation at https://slack.cncf.io/). -This project shares maintainers from the main Flux v2 git repository, -as listed in +In additional to those listed below, this project shares maintainers +from the main Flux v2 git repository, as listed in https://github.com/fluxcd/flux2/blob/main/MAINTAINERS + +Dipti Pai, Microsoft (github: @dipti-pai, slack: Dipti Pai) diff --git a/Makefile b/Makefile index bb622903c..28226af5d 100644 --- a/Makefile +++ b/Makefile @@ -1,114 +1,220 @@ # Image URL to use all building/pushing image targets -IMG ?= fluxcd/source-controller:latest +IMG ?= fluxcd/source-controller +TAG ?= latest + +# Allows for defining additional Go test args, e.g. '-tags integration'. +GO_TEST_ARGS ?= -race + +# Allows for filtering tests based on the specified prefix +GO_TEST_PREFIX ?= + +# Defines whether cosign verification should be skipped. +SKIP_COSIGN_VERIFICATION ?= false + +# Allows for defining additional Docker buildx arguments, +# e.g. '--push'. +BUILD_ARGS ?= +# Architectures to build images for +BUILD_PLATFORMS ?= linux/amd64,linux/arm64,linux/arm/v7 + +# Go additional tag arguments, e.g. 'integration', +# this is append to the tag arguments required for static builds +GO_TAGS ?= + # Produce CRDs that work back to Kubernetes 1.16 CRD_OPTIONS ?= crd:crdVersions=v1 -# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +# Repository root based on Git metadata +REPOSITORY_ROOT := $(shell git rev-parse --show-toplevel) +BUILD_DIR := $(REPOSITORY_ROOT)/build + +# Other dependency versions +ENVTEST_BIN_VERSION ?= 1.24.0 + +# FUZZ_TIME defines the max amount of time, in Go Duration, +# each fuzzer should run for. +FUZZ_TIME ?= 1m + +GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))' + +# API (doc) generation utilities +CONTROLLER_GEN_VERSION ?= v0.19.0 +GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113 + +# If gobin not set, create one on ./build and add to path. ifeq (,$(shell go env GOBIN)) -GOBIN=$(shell go env GOPATH)/bin +export GOBIN=$(BUILD_DIR)/gobin else -GOBIN=$(shell go env GOBIN) +export GOBIN=$(shell go env GOBIN) endif +export PATH:=${GOBIN}:${PATH} -all: manager +# Architecture to use envtest with +ifeq ($(shell uname -m),x86_64) +ENVTEST_ARCH ?= amd64 +else +ENVTEST_ARCH ?= arm64 +endif + +ifeq ($(shell uname -s),Darwin) +# Envtest only supports darwin-amd64 +ENVTEST_ARCH=amd64 +endif -# Run tests -test: generate fmt vet manifests api-docs - go test ./... -coverprofile cover.out - cd api; go test ./... -coverprofile cover.out +all: manager # Build manager binary manager: generate fmt vet - go build -o bin/manager main.go - -# Run against the configured Kubernetes cluster in ~/.kube/config -run: generate fmt vet manifests - go run ./main.go - -# Install CRDs into a cluster -install: manifests + go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go + +KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)" +test: install-envtest test-api ## Run all tests + HTTPS_PROXY="" HTTP_PROXY="" \ + KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ + GIT_CONFIG_GLOBAL=/dev/null \ + GIT_CONFIG_NOSYSTEM=true \ + go test $(GO_STATIC_FLAGS) \ + ./... \ + $(GO_TEST_ARGS) \ + -coverprofile cover.out + +test-ctrl: install-envtest test-api ## Run controller tests + HTTPS_PROXY="" HTTP_PROXY="" \ + KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ + GIT_CONFIG_GLOBAL=/dev/null \ + go test $(GO_STATIC_FLAGS) \ + -run "^$(GO_TEST_PREFIX).*" \ + -v ./internal/controller \ + -coverprofile cover.out + +test-api: ## Run api tests + cd api; go test $(GO_TEST_ARGS) ./... -coverprofile cover.out + +run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config + @mkdir -p $(PWD)/bin/data + go run $(GO_STATIC_FLAGS) ./main.go --storage-adv-addr=:0 --storage-path=$(PWD)/bin/data + +install: manifests ## Install CRDs into a cluster kustomize build config/crd | kubectl apply -f - -# Uninstall CRDs from a cluster -uninstall: manifests +uninstall: manifests ## Uninstall CRDs from a cluster kustomize build config/crd | kubectl delete -f - -# Deploy controller in the configured Kubernetes cluster in ~/.kube/config -deploy: manifests - cd config/manager && kustomize edit set image fluxcd/source-controller=${IMG} +deploy: manifests ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config + cd config/manager && kustomize edit set image fluxcd/source-controller=$(IMG):$(TAG) kustomize build config/default | kubectl apply -f - -# Deploy controller dev image in the configured Kubernetes cluster in ~/.kube/config -dev-deploy: +dev-deploy: ## Deploy controller dev image in the configured Kubernetes cluster in ~/.kube/config mkdir -p config/dev && cp config/default/* config/dev - cd config/dev && kustomize edit set image fluxcd/source-controller=${IMG} + cd config/dev && kustomize edit set image fluxcd/source-controller=$(IMG):$(TAG) kustomize build config/dev | kubectl apply -f - rm -rf config/dev -# Generate manifests e.g. CRD, RBAC etc. -manifests: controller-gen +manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc. $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="config/crd/bases" cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" -# Generate API reference documentation -api-docs: gen-crd-api-reference-docs - $(API_REF_GEN) -api-dir=./api/v1beta1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md +api-docs: gen-crd-api-reference-docs ## Generate API reference documentation + $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md -# Run go mod tidy -tidy: - go mod tidy - cd api; go mod tidy +tidy: ## Run go mod tidy + cd api; rm -f go.sum; go mod tidy -compat=1.25 + rm -f go.sum; go mod tidy -compat=1.25 -# Run go fmt against code -fmt: +fmt: ## Run go fmt against code go fmt ./... cd api; go fmt ./... -# Run go vet against code -vet: +vet: ## Run go vet against code go vet ./... cd api; go vet ./... -# Generate code -generate: controller-gen +generate: controller-gen ## Generate API code cd api; $(CONTROLLER_GEN) object:headerFile="../hack/boilerplate.go.txt" paths="./..." -# Build the docker image -docker-build: - docker build . -t ${IMG} +docker-build: ## Build the Docker image + docker buildx build \ + --platform=$(BUILD_PLATFORMS) \ + -t $(IMG):$(TAG) \ + $(BUILD_ARGS) . -# Push the docker image -docker-push: - docker push ${IMG} +docker-push: ## Push Docker image + docker push $(IMG):$(TAG) # Find or download controller-gen -controller-gen: -ifeq (, $(shell which controller-gen)) - @{ \ - set -e ;\ - CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ - cd $$CONTROLLER_GEN_TMP_DIR ;\ - go mod init tmp ;\ - go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.3.0 ;\ - rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ - } -CONTROLLER_GEN=$(GOBIN)/controller-gen -else -CONTROLLER_GEN=$(shell which controller-gen) -endif +CONTROLLER_GEN = $(GOBIN)/controller-gen +.PHONY: controller-gen +controller-gen: ## Download controller-gen locally if necessary. + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION)) # Find or download gen-crd-api-reference-docs -gen-crd-api-reference-docs: -ifeq (, $(shell which gen-crd-api-reference-docs)) - @{ \ - set -e ;\ - API_REF_GEN_TMP_DIR=$$(mktemp -d) ;\ - cd $$API_REF_GEN_TMP_DIR ;\ - go mod init tmp ;\ - go get github.com/ahmetb/gen-crd-api-reference-docs@v0.2.0 ;\ - rm -rf $$API_REF_GEN_TMP_DIR ;\ - } -API_REF_GEN=$(GOBIN)/gen-crd-api-reference-docs -else -API_REF_GEN=$(shell which gen-crd-api-reference-docs) -endif +GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs +.PHONY: gen-crd-api-reference-docs +gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary + $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@$(GEN_API_REF_DOCS_VERSION)) + +ENVTEST = $(GOBIN)/setup-envtest +.PHONY: envtest +setup-envtest: ## Download setup-envtest locally if necessary. + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) + +ENVTEST_ASSETS_DIR=$(BUILD_DIR)/testbin +ENVTEST_KUBERNETES_VERSION?=latest +install-envtest: setup-envtest ## Download envtest binaries locally. + mkdir -p ${ENVTEST_ASSETS_DIR} + $(ENVTEST) use $(ENVTEST_KUBERNETES_VERSION) --arch=$(ENVTEST_ARCH) --bin-dir=$(ENVTEST_ASSETS_DIR) +# setup-envtest sets anything below k8s to 0555 + chmod -R u+w $(BUILD_DIR)/testbin + +.PHONY: help +help: ## Display this help menu + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +e2e: + ./hack/ci/e2e.sh + +verify: fmt vet manifests api-docs tidy + @if [ ! "$$(git status --porcelain --untracked-files=no)" = "" ]; then \ + echo "working directory is dirty:"; \ + git --no-pager diff; \ + exit 1; \ + fi + +# go-install-tool will 'go install' any package $2 and install it to $1. +define go-install-tool +@[ -f $(1) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +go mod init tmp ;\ +echo "Downloading $(2)" ;\ +env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\ +rm -rf $$TMP_DIR ;\ +} +endef + +# Build fuzzers used by oss-fuzz. +fuzz-build: + rm -rf $(shell pwd)/build/fuzz/ + mkdir -p $(shell pwd)/build/fuzz/out/ + + docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder + docker run --rm \ + -e FUZZING_LANGUAGE=go -e SANITIZER=address \ + -e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \ + -v "$(shell pwd)/build/fuzz/out":/out \ + local-fuzzing:latest + +# Run each fuzzer once to ensure they will work when executed by oss-fuzz. +fuzz-smoketest: fuzz-build + docker run --rm \ + -v "$(shell pwd)/build/fuzz/out":/out \ + -v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \ + local-fuzzing:latest \ + bash -c "/runner.sh" + +# Run fuzz tests for the duration set in FUZZ_TIME. +fuzz-native: + KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ + FUZZ_TIME=$(FUZZ_TIME) \ + ./tests/fuzz/native_go_run.sh diff --git a/PROJECT b/PROJECT index a807390b9..9d89d81be 100644 --- a/PROJECT +++ b/PROJECT @@ -1,6 +1,27 @@ domain: toolkit.fluxcd.io repo: github.com/fluxcd/source-controller resources: +- group: source + kind: GitRepository + version: v1 +- group: source + kind: GitRepository + version: v1beta2 +- group: source + kind: HelmRepository + version: v1 +- group: source + kind: HelmRepository + version: v1beta2 +- group: source + kind: HelmChart + version: v1 +- group: source + kind: HelmChart + version: v1beta2 +- group: source + kind: Bucket + version: v1beta2 - group: source kind: GitRepository version: v1beta1 @@ -13,4 +34,16 @@ resources: - group: source kind: Bucket version: v1beta1 +- group: source + kind: OCIRepository + version: v1beta2 +- group: source + kind: Bucket + version: v1 +- group: source + kind: OCIRepository + version: v1 +- group: source + kind: ExternalArtifact + version: v1 version: "2" diff --git a/README.md b/README.md index efb28014e..6f07b2e00 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,53 @@ # Source controller -[![e2e](https://github.com/fluxcd/source-controller/workflows/e2e/badge.svg)](https://github.com/fluxcd/kustomize-controller/actions) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4786/badge)](https://bestpractices.coreinfrastructure.org/projects/4786) +[![e2e](https://github.com/fluxcd/source-controller/workflows/e2e/badge.svg)](https://github.com/fluxcd/source-controller/actions) [![report](https://goreportcard.com/badge/github.com/fluxcd/source-controller)](https://goreportcard.com/report/github.com/fluxcd/source-controller) [![license](https://img.shields.io/github/license/fluxcd/source-controller.svg)](https://github.com/fluxcd/source-controller/blob/main/LICENSE) [![release](https://img.shields.io/github/release/fluxcd/source-controller/all.svg)](https://github.com/fluxcd/source-controller/releases) - + The source-controller is a Kubernetes operator, specialised in artifacts acquisition -from external sources such as Git, Helm repositories and S3 buckets. +from external sources such as Git, OCI, Helm repositories and S3-compatible buckets. The source-controller implements the -[source.toolkit.fluxcd.io](https://github.com/fluxcd/source-controller/tree/master/docs/spec/v1beta1) API -and is a core component of the [GitOps toolkit](https://toolkit.fluxcd.io). +[source.toolkit.fluxcd.io](docs/spec/README.md) API +and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/components/). ![overview](docs/diagrams/source-controller-overview.png) -Features: +## APIs + +| Kind | API Version | +|----------------------------------------------------|-------------------------------| +| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` | +| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` | + +## Features -* authenticates to sources (SSH, user/password, API token) -* validates source authenticity (PGP) +* authenticates to sources (SSH, user/password, API token, Workload Identity) +* validates source authenticity (PGP, Cosign, Notation) * detects source changes based on update policies (semver) * fetches resources on-demand and on-a-schedule * packages the fetched resources into a well-known format (tar.gz, yaml) * makes the artifacts addressable by their source identifier (sha, version, ts) * makes the artifacts available in-cluster to interested 3rd parties * notifies interested 3rd parties of source changes and availability (status conditions, events, hooks) -* reacts to Git push and Helm chart upload events (via [notification-controller](https://github.com/fluxcd/notification-controller)) +* reacts to Git, Helm and OCI artifacts push events (via [notification-controller](https://github.com/fluxcd/notification-controller)) + +## Guides + +* [Get started with Flux](https://fluxcd.io/flux/get-started/) +* [Setup Webhook Receivers](https://fluxcd.io/flux/guides/webhook-receivers/) +* [Setup Notifications](https://fluxcd.io/flux/guides/notifications/) +* [How to build, publish and consume OCI Artifacts with Flux](https://fluxcd.io/flux/cheatsheets/oci-artifacts/) + +## Roadmap + +The roadmap for the Flux family of projects can be found at . + +## Contributing + +This project is Apache 2.0 licensed and accepts contributions via GitHub pull requests. +To start contributing please see the [development guide](DEVELOPMENT.md). diff --git a/api/go.mod b/api/go.mod index aaa08b45c..3d821f349 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,10 +1,34 @@ module github.com/fluxcd/source-controller/api -go 1.15 +go 1.25.0 require ( - github.com/fluxcd/pkg/apis/meta v0.7.0 - k8s.io/api v0.20.2 // indirect - k8s.io/apimachinery v0.20.2 - sigs.k8s.io/controller-runtime v0.8.0 + github.com/fluxcd/pkg/apis/acl v0.9.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + k8s.io/apimachinery v0.34.0 + sigs.k8s.io/controller-runtime v0.22.1 +) + +// Fix CVE-2022-28948 +replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 + +require ( + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/text v0.28.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index aa7e666d4..1aa815d66 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,663 +1,118 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fluxcd/pkg/apis/meta v0.7.0 h1:5e8gm4OLqjuKWdrOIY5DEEsjcwzyJFK8rCDesJ+V8IY= -github.com/fluxcd/pkg/apis/meta v0.7.0/go.mod h1:yHuY8kyGHYz22I0jQzqMMGCcHViuzC/WPdo9Gisk8Po= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= -k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.8.0 h1:s0dYdo7lQgJiAf+alP82PRwbz+oAqL3oSyMQ18XRDOc= -sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go new file mode 100644 index 000000000..bbedcefb3 --- /dev/null +++ b/api/v1/bucket_types.go @@ -0,0 +1,281 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = "generic" + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service + // and workload identity authentication. + BucketProviderAmazon string = "aws" + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = "gcp" + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = "azure" +) + +// BucketSpec specifies the required configuration to produce an Artifact for +// an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.serviceAccountName)", message="ServiceAccountName is not supported for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.secretRef) || !has(self.serviceAccountName)", message="cannot set both .spec.secretRef and .spec.serviceAccountName" +type BucketSpec struct { + // Provider of the object storage bucket. + // Defaults to 'generic', which expects an S3 (API) compatible object + // storage. + // +kubebuilder:validation:Enum=generic;aws;gcp;azure + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // BucketName is the name of the object storage bucket. + // +required + BucketName string `json:"bucketName"` + + // Endpoint is the object storage address the BucketName is located at. + // +required + Endpoint string `json:"endpoint"` + + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP Endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Region of the Endpoint where the BucketName is located in. + // +optional + Region string `json:"region,omitempty"` + + // Prefix to use for server-side filtering of files in the Bucket. + // +optional + Prefix string `json:"prefix,omitempty"` + + // SecretRef specifies the Secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the bucket. This field is only supported for the 'gcp' and 'aws' providers. + // For more information about workload identity: + // https://fluxcd.io/flux/components/source/buckets/#workload-identity + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the Bucket Endpoint is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for fetch operations, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // Bucket. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + +// BucketStatus records the observed state of a Bucket. +type BucketStatus struct { + // ObservedGeneration is the last observed generation of the Bucket object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful Bucket reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceededReason signals that the Bucket listing and fetch + // operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" + + // BucketOperationFailedReason signals that the Bucket listing or fetch + // operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in *Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in *Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *Bucket) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// Bucket is the Schema for the buckets API. +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// BucketList contains a list of Bucket objects. +// +kubebuilder:object:root=true +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1/condition_types.go b/api/v1/condition_types.go new file mode 100644 index 000000000..9641db99c --- /dev/null +++ b/api/v1/condition_types.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const SourceFinalizer = "finalizers.fluxcd.io" + +const ( + // ArtifactInStorageCondition indicates the availability of the Artifact in + // the storage. + // If True, the Artifact is stored successfully. + // This Condition is only present on the resource if the Artifact is + // successfully stored. + ArtifactInStorageCondition string = "ArtifactInStorage" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source + // is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // SourceVerifiedCondition indicates the integrity verification of the + // Source. + // If True, the integrity check succeeded. If False, it failed. + // This Condition is only present on the resource if the integrity check + // is enabled. + SourceVerifiedCondition string = "SourceVerified" + + // FetchFailedCondition indicates a transient or persistent fetch failure + // of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, + // and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" + + // BuildFailedCondition indicates a transient or persistent build failure + // of a Source's Artifact. + // If True, the Source can be in an ArtifactOutdatedCondition. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + BuildFailedCondition string = "BuildFailed" + + // StorageOperationFailedCondition indicates a transient or persistent + // failure related to storage. If True, the reconciliation failed while + // performing some filesystem operation. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + StorageOperationFailedCondition string = "StorageOperationFailed" +) + +// Reasons are provided as utility, and not part of the declarative API. +const ( + // URLInvalidReason signals that a given Source has an invalid URL. + URLInvalidReason string = "URLInvalid" + + // AuthenticationFailedReason signals that a Secret does not have the + // required fields, or the provided credentials do not match. + AuthenticationFailedReason string = "AuthenticationFailed" + + // VerificationError signals that the Source's verification + // check failed. + VerificationError string = "VerificationError" + + // DirCreationFailedReason signals a failure caused by a directory creation + // operation. + DirCreationFailedReason string = "DirectoryCreationFailed" + + // StatOperationFailedReason signals a failure caused by a stat operation on + // a path. + StatOperationFailedReason string = "StatOperationFailed" + + // ReadOperationFailedReason signals a failure caused by a read operation. + ReadOperationFailedReason string = "ReadOperationFailed" + + // AcquireLockFailedReason signals a failure in acquiring lock. + AcquireLockFailedReason string = "AcquireLockFailed" + + // InvalidPathReason signals a failure caused by an invalid path. + InvalidPathReason string = "InvalidPath" + + // ArchiveOperationFailedReason signals a failure in archive operation. + ArchiveOperationFailedReason string = "ArchiveOperationFailed" + + // SymlinkUpdateFailedReason signals a failure in updating a symlink. + SymlinkUpdateFailedReason string = "SymlinkUpdateFailed" + + // ArtifactUpToDateReason signals that an existing Artifact is up-to-date + // with the Source. + ArtifactUpToDateReason string = "ArtifactUpToDate" + + // CacheOperationFailedReason signals a failure in cache operation. + CacheOperationFailedReason string = "CacheOperationFailed" + + // PatchOperationFailedReason signals a failure in patching a kubernetes API + // object. + PatchOperationFailedReason string = "PatchOperationFailed" + + // InvalidSTSConfigurationReason signals that the STS configurtion is invalid. + InvalidSTSConfigurationReason string = "InvalidSTSConfiguration" + + // InvalidProviderConfigurationReason signals that the provider + // configuration is invalid. + InvalidProviderConfigurationReason string = "InvalidProviderConfiguration" +) diff --git a/internal/helm/utils.go b/api/v1/doc.go similarity index 66% rename from internal/helm/utils.go rename to api/v1/doc.go index ff2221c61..a06b2174b 100644 --- a/internal/helm/utils.go +++ b/api/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Flux authors +Copyright 2023 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package helm - -import "strings" - -// NormalizeChartRepositoryURL ensures repository urls are normalized -func NormalizeChartRepositoryURL(url string) string { - if url != "" { - return strings.TrimRight(url, "/") + "/" - } - return url -} +// Package v1 contains API Schema definitions for the source v1 API group +// +kubebuilder:object:generate=true +// +groupName=source.toolkit.fluxcd.io +package v1 diff --git a/api/v1/externalartifact_types.go b/api/v1/externalartifact_types.go new file mode 100644 index 000000000..e338b733b --- /dev/null +++ b/api/v1/externalartifact_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// ExternalArtifactKind is the string representation of the ExternalArtifact. +const ExternalArtifactKind = "ExternalArtifact" + +// ExternalArtifactSpec defines the desired state of ExternalArtifact +type ExternalArtifactSpec struct { + // SourceRef points to the Kubernetes custom resource for + // which the artifact is generated. + // +optional + SourceRef *meta.NamespacedObjectKindReference `json:"sourceRef,omitempty"` +} + +// ExternalArtifactStatus defines the observed state of ExternalArtifact +type ExternalArtifactStatus struct { + // Artifact represents the output of an ExternalArtifact reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // Conditions holds the conditions for the ExternalArtifact. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// GetConditions returns the status conditions of the object. +func (in *ExternalArtifact) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *ExternalArtifact) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetArtifact returns the latest Artifact from the ExternalArtifact if +// present in the status sub-resource. +func (in *ExternalArtifact) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetRequeueAfter returns the duration after which the ExternalArtifact +// must be reconciled again. +func (in *ExternalArtifact) GetRequeueAfter() time.Duration { + return time.Minute +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Source",type="string",JSONPath=".spec.sourceRef.name",description="" + +// ExternalArtifact is the Schema for the external artifacts API +type ExternalArtifact struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalArtifactSpec `json:"spec,omitempty"` + Status ExternalArtifactStatus `json:"status,omitempty"` +} + +// ExternalArtifactList contains a list of ExternalArtifact +// +kubebuilder:object:root=true +type ExternalArtifactList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalArtifact `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalArtifact{}, &ExternalArtifactList{}) +} diff --git a/api/v1/gitrepository_types.go b/api/v1/gitrepository_types.go new file mode 100644 index 000000000..f104fd0f1 --- /dev/null +++ b/api/v1/gitrepository_types.go @@ -0,0 +1,384 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // GitRepositoryKind is the string representation of a GitRepository. + GitRepositoryKind = "GitRepository" + + // GitProviderGeneric provides support for authentication using + // credentials specified in secretRef. + GitProviderGeneric string = "generic" + + // GitProviderAzure provides support for authentication to azure + // repositories using Managed Identity. + GitProviderAzure string = "azure" + + // GitProviderGitHub provides support for authentication to git + // repositories using GitHub App authentication + GitProviderGitHub string = "github" +) + +const ( + // IncludeUnavailableCondition indicates one of the includes is not + // available. For example, because it does not exist, or does not have an + // Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" +) + +// GitVerificationMode specifies the verification mode for a Git repository. +type GitVerificationMode string + +// Valid checks the validity of the Git verification mode. +func (m GitVerificationMode) Valid() bool { + switch m { + case ModeGitHEAD, ModeGitTag, ModeGitTagAndHEAD: + return true + default: + return false + } +} + +const ( + // ModeGitHEAD implies that the HEAD of the Git repository (after it has been + // checked out to the required commit) should be verified. + ModeGitHEAD GitVerificationMode = "HEAD" + // ModeGitTag implies that the tag object specified in the checkout configuration + // should be verified. + ModeGitTag GitVerificationMode = "Tag" + // ModeGitTagAndHEAD implies that both the tag object and the commit it points + // to should be verified. + ModeGitTagAndHEAD GitVerificationMode = "TagAndHEAD" +) + +// GitRepositorySpec specifies the required configuration to produce an +// Artifact for a Git repository. +// +kubebuilder:validation:XValidation:rule="!has(self.serviceAccountName) || (has(self.provider) && self.provider == 'azure')",message="serviceAccountName can only be set when provider is 'azure'" +type GitRepositorySpec struct { + // URL specifies the Git repository URL, it can be an HTTP/S or SSH address. + // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials for + // the GitRepository. + // For HTTPS repositories the Secret must contain 'username' and 'password' + // fields for basic auth or 'bearerToken' field for token auth. + // For SSH repositories the Secret must contain 'identity' + // and 'known_hosts' fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Provider used for authentication, can be 'azure', 'github', 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;azure;github + // +optional + Provider string `json:"provider,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to + // authenticate to the GitRepository. This field is only supported for 'azure' provider. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // Interval at which the GitRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for Git operations like cloning, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Reference specifies the Git reference to resolve and monitor for + // changes, defaults to the 'master' branch. + // +optional + Reference *GitRepositoryRef `json:"ref,omitempty"` + + // Verification specifies the configuration to verify the Git commit + // signature(s). + // +optional + Verification *GitRepositoryVerification `json:"verify,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Git server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // GitRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // RecurseSubmodules enables the initialization of all submodules within + // the GitRepository as cloned from the URL, using their default settings. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Include specifies a list of GitRepository resources which Artifacts + // should be included in the Artifact produced for this GitRepository. + // +optional + Include []GitRepositoryInclude `json:"include,omitempty"` + + // SparseCheckout specifies a list of directories to checkout when cloning + // the repository. If specified, only these directories are included in the + // Artifact produced for this GitRepository. + // +optional + SparseCheckout []string `json:"sparseCheckout,omitempty"` +} + +// GitRepositoryInclude specifies a local reference to a GitRepository which +// Artifact (sub-)contents must be included, and where they should be placed. +type GitRepositoryInclude struct { + // GitRepositoryRef specifies the GitRepository which Artifact contents + // must be included. + // +required + GitRepositoryRef meta.LocalObjectReference `json:"repository"` + + // FromPath specifies the path to copy contents from, defaults to the root + // of the Artifact. + // +optional + FromPath string `json:"fromPath,omitempty"` + + // ToPath specifies the path to copy contents to, defaults to the name of + // the GitRepositoryRef. + // +optional + ToPath string `json:"toPath,omitempty"` +} + +// GetFromPath returns the specified FromPath. +func (in *GitRepositoryInclude) GetFromPath() string { + return in.FromPath +} + +// GetToPath returns the specified ToPath, falling back to the name of the +// GitRepositoryRef. +func (in *GitRepositoryInclude) GetToPath() string { + if in.ToPath == "" { + return in.GitRepositoryRef.Name + } + return in.ToPath +} + +// GitRepositoryRef specifies the Git reference to resolve and checkout. +type GitRepositoryRef struct { + // Branch to check out, defaults to 'master' if no other field is defined. + // +optional + Branch string `json:"branch,omitempty"` + + // Tag to check out, takes precedence over Branch. + // +optional + Tag string `json:"tag,omitempty"` + + // SemVer tag expression to check out, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + // + // It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + // Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" + // +optional + Name string `json:"name,omitempty"` + + // Commit SHA to check out, takes precedence over all reference fields. + // + // This can be combined with Branch to shallow clone the branch, in which + // the commit is expected to exist. + // +optional + Commit string `json:"commit,omitempty"` +} + +// GitRepositoryVerification specifies the Git commit signature verification +// strategy. +type GitRepositoryVerification struct { + // Mode specifies which Git object(s) should be verified. + // + // The variants "head" and "HEAD" both imply the same thing, i.e. verify + // the commit that the HEAD of the Git repository points to. The variant + // "head" solely exists to ensure backwards compatibility. + // +kubebuilder:validation:Enum=head;HEAD;Tag;TagAndHEAD + // +optional + // +kubebuilder:default:=HEAD + Mode GitVerificationMode `json:"mode,omitempty"` + + // SecretRef specifies the Secret containing the public keys of trusted Git + // authors. + // +required + SecretRef meta.LocalObjectReference `json:"secretRef"` +} + +// GitRepositoryStatus records the observed state of a Git repository. +type GitRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the GitRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the GitRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Artifact represents the last successful GitRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // IncludedArtifacts contains a list of the last successfully included + // Artifacts as instructed by GitRepositorySpec.Include. + // +optional + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedRecurseSubmodules is the observed resource submodules + // configuration used to produce the current Artifact. + // +optional + ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"` + + // ObservedInclude is the observed list of GitRepository resources used to + // produce the current Artifact. + // +optional + ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"` + + // ObservedSparseCheckout is the observed list of directories used to + // produce the current Artifact. + // +optional + ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"` + + // SourceVerificationMode is the last used verification mode indicating + // which Git object(s) have been verified. + // +optional + SourceVerificationMode *GitVerificationMode `json:"sourceVerificationMode,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // GitOperationSucceedReason signals that a Git operation (e.g. clone, + // checkout, etc.) succeeded. + GitOperationSucceedReason string = "GitOperationSucceeded" + + // GitOperationFailedReason signals that a Git operation (e.g. clone, + // checkout, etc.) failed. + GitOperationFailedReason string = "GitOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the GitRepository must be +// reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the GitRepository if present in +// the status sub-resource. +func (in *GitRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetProvider returns the Git authentication provider. +func (v *GitRepository) GetProvider() string { + if v.Spec.Provider == "" { + return GitProviderGeneric + } + return v.Spec.Provider +} + +// GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default. +func (v *GitRepositoryVerification) GetMode() GitVerificationMode { + if v.Mode.Valid() { + return v.Mode + } + return ModeGitHEAD +} + +// VerifyHEAD returns if the configured mode instructs verification of the +// Git HEAD. +func (v *GitRepositoryVerification) VerifyHEAD() bool { + return v.GetMode() == ModeGitHEAD || v.GetMode() == ModeGitTagAndHEAD +} + +// VerifyTag returns if the configured mode instructs verification of the +// Git tag. +func (v *GitRepositoryVerification) VerifyTag() bool { + return v.GetMode() == ModeGitTag || v.GetMode() == ModeGitTagAndHEAD +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=gitrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// GitRepository is the Schema for the gitrepositories API. +type GitRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GitRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status GitRepositoryStatus `json:"status,omitempty"` +} + +// GitRepositoryList contains a list of GitRepository objects. +// +kubebuilder:object:root=true +type GitRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GitRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{}) +} diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go new file mode 100644 index 000000000..b539a7947 --- /dev/null +++ b/api/v1/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1/helmchart_types.go b/api/v1/helmchart_types.go new file mode 100644 index 000000000..23cb24146 --- /dev/null +++ b/api/v1/helmchart_types.go @@ -0,0 +1,227 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// HelmChartKind is the string representation of a HelmChart. +const HelmChartKind = "HelmChart" + +// HelmChartSpec specifies the desired state of a Helm chart. +type HelmChartSpec struct { + // Chart is the name or path the Helm chart is available at in the + // SourceRef. + // +required + Chart string `json:"chart"` + + // Version is the chart version semver expression, ignored for charts from + // GitRepository and Bucket sources. Defaults to latest when omitted. + // +kubebuilder:default:=* + // +optional + Version string `json:"version,omitempty"` + + // SourceRef is the reference to the Source the chart is available at. + // +required + SourceRef LocalHelmChartSourceReference `json:"sourceRef"` + + // Interval at which the HelmChart SourceRef is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // ReconcileStrategy determines what enables the creation of a new artifact. + // Valid values are ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // ValuesFiles is an alternative list of values files to use as the chart + // values (values.yaml is not included by default), expected to be a + // relative path in the SourceRef. + // Values files are merged in the order of this list with the last file + // overriding the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // IgnoreMissingValuesFiles controls whether to silently ignore missing values + // files rather than failing. + // +optional + IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // This field is only supported when using HelmRepository source with spec.type 'oci'. + // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` +} + +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + +// LocalHelmChartSourceReference contains enough information to let you locate +// the typed referenced object at namespace level. +type LocalHelmChartSourceReference struct { + // APIVersion of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + // 'Bucket'). + // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// HelmChartStatus records the observed state of the HelmChart. +type HelmChartStatus struct { + // ObservedGeneration is the last observed generation of the HelmChart + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ObservedSourceArtifactRevision is the last observed Artifact.Revision + // of the HelmChartSpec.SourceRef. + // +optional + ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"` + + // ObservedChartName is the last observed chart name as specified by the + // resolved chart reference. + // +optional + ObservedChartName string `json:"observedChartName,omitempty"` + + // ObservedValuesFiles are the observed value files of the last successful + // reconciliation. + // It matches the chart in the last successfully reconciled artifact. + // +optional + ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"` + + // Conditions holds the conditions for the HelmChart. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // ChartPullSucceededReason signals that the pull of the Helm chart + // succeeded. + ChartPullSucceededReason string = "ChartPullSucceeded" + + // ChartPackageSucceededReason signals that the package of the Helm + // chart succeeded. + ChartPackageSucceededReason string = "ChartPackageSucceeded" +) + +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmChart) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + return in.Spec.ValuesFiles +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=hc +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` +// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmChart is the Schema for the helmcharts API. +type HelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmChartStatus `json:"status,omitempty"` +} + +// HelmChartList contains a list of HelmChart objects. +// +kubebuilder:object:root=true +type HelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmChart `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmChart{}, &HelmChartList{}) +} diff --git a/api/v1/helmrepository_types.go b/api/v1/helmrepository_types.go new file mode 100644 index 000000000..1c19064a5 --- /dev/null +++ b/api/v1/helmrepository_types.go @@ -0,0 +1,228 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // HelmRepositoryKind is the string representation of a HelmRepository. + HelmRepositoryKind = "HelmRepository" + // HelmRepositoryURLIndexKey is the key used for indexing HelmRepository + // objects by their HelmRepositorySpec.URL. + HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" + // HelmRepositoryTypeDefault is the default HelmRepository type. + // It is used when no type is specified and corresponds to a Helm repository. + HelmRepositoryTypeDefault = "default" + // HelmRepositoryTypeOCI is the type for an OCI repository. + HelmRepositoryTypeOCI = "oci" +) + +// HelmRepositorySpec specifies the required configuration to produce an +// Artifact for a Helm repository index YAML. +type HelmRepositorySpec struct { + // URL of the Helm repository, a valid URL contains at least a protocol and + // host. + // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials + // for the HelmRepository. + // For HTTP/S basic auth the secret must contain 'username' and 'password' + // fields. + // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + // keys is deprecated. Please use `.spec.certSecretRef` instead. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // It takes precedence over the values specified in the Secret referred + // to by `.spec.secretRef`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // PassCredentials allows the credentials from the SecretRef to be passed + // on to a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the + // index differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result + // in credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + + // Interval at which the HelmRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +optional + Interval metav1.Duration `json:"interval,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // This field is only taken into account if the .spec.type field is set to 'oci'. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Timeout is used for the index fetch operation for an HTTPS helm repository, + // and for remote OCI Repository operations like pulling for an OCI helm + // chart by the associated HelmChart. + // Its default value is 60s. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // HelmRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` + + // Type of the HelmRepository. + // When this field is set to "oci", the URL field value must be prefixed with "oci://". + // +kubebuilder:validation:Enum=default;oci + // +optional + Type string `json:"type,omitempty"` + + // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` +} + +// HelmRepositoryStatus records the observed state of the HelmRepository. +type HelmRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the HelmRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // HelmRepositoryStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful HelmRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // IndexationFailedReason signals that the HelmRepository index fetch + // failed. + IndexationFailedReason string = "IndexationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + if in.Spec.Interval.Duration != 0 { + return in.Spec.Interval.Duration + } + return time.Minute +} + +// GetTimeout returns the timeout duration used for various operations related +// to this HelmRepository. +func (in HelmRepository) GetTimeout() time.Duration { + if in.Spec.Timeout != nil { + return in.Spec.Timeout.Duration + } + return time.Minute +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmRepository is the Schema for the helmrepositories API. +type HelmRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmRepositoryStatus `json:"status,omitempty"` +} + +// HelmRepositoryList contains a list of HelmRepository objects. +// +kubebuilder:object:root=true +type HelmRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{}) +} diff --git a/api/v1/ocirepository_types.go b/api/v1/ocirepository_types.go new file mode 100644 index 000000000..8c4d3f0fc --- /dev/null +++ b/api/v1/ocirepository_types.go @@ -0,0 +1,296 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // OCIRepositoryKind is the string representation of an OCIRepository. + OCIRepositoryKind = "OCIRepository" + + // OCIRepositoryPrefix is the prefix used for OCIRepository URLs. + OCIRepositoryPrefix = "oci://" + + // GenericOCIProvider provides support for authentication using static credentials + // for any OCI compatible API such as Docker Registry, GitHub Container Registry, + // Docker Hub, Quay, etc. + GenericOCIProvider string = "generic" + + // AmazonOCIProvider provides support for OCI authentication using AWS IRSA. + AmazonOCIProvider string = "aws" + + // GoogleOCIProvider provides support for OCI authentication using GCP workload identity. + GoogleOCIProvider string = "gcp" + + // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal, + // Managed Identity or Shared Key. + AzureOCIProvider string = "azure" + + // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer. + OCILayerExtract = "extract" + + // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer. + OCILayerCopy = "copy" +) + +// OCIRepositorySpec defines the desired state of OCIRepository +type OCIRepositorySpec struct { + // URL is a reference to an OCI artifact repository hosted + // on a remote container registry. + // +kubebuilder:validation:Pattern="^oci://.*$" + // +required + URL string `json:"url"` + + // The OCI reference to pull and monitor for changes, + // defaults to the latest tag. + // +optional + Reference *OCIRepositoryRef `json:"ref,omitempty"` + + // LayerSelector specifies which layer should be extracted from the OCI artifact. + // When not specified, the first layer found in the artifact is selected. + // +optional + LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"` + + // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // SecretRef contains the secret name containing the registry login + // credentials to resolve image metadata. + // The secret must be of type kubernetes.io/dockerconfigjson. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the image pull if the service account has attached pull secrets. For more information: + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the OCIRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote OCI Repository operations like pulling, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// OCIRepositoryRef defines the image reference for the OCIRepository's URL +type OCIRepositoryRef struct { + // Digest is the image digest to pull, takes precedence over SemVer. + // The value should be in the format 'sha256:'. + // +optional + Digest string `json:"digest,omitempty"` + + // SemVer is the range of tags to pull selecting the latest within + // the range, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + + // Tag is the image tag to pull, defaults to latest. + // +optional + Tag string `json:"tag,omitempty"` +} + +// OCILayerSelector specifies which layer should be extracted from an OCI Artifact +type OCILayerSelector struct { + // MediaType specifies the OCI media type of the layer + // which should be extracted from the OCI Artifact. The + // first layer matching this type is selected. + // +optional + MediaType string `json:"mediaType,omitempty"` + + // Operation specifies how the selected layer should be processed. + // By default, the layer compressed content is extracted to storage. + // When the operation is set to 'copy', the layer compressed content + // is persisted to storage as it is. + // +kubebuilder:validation:Enum=extract;copy + // +optional + Operation string `json:"operation,omitempty"` +} + +// OCIRepositoryStatus defines the observed state of OCIRepository +type OCIRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the OCIRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last OCI Repository sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful OCI Repository sync. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedLayerSelector is the observed layer selector used for constructing + // the source artifact. + // +optional + ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // OCIPullFailedReason signals that a pull operation failed. + OCIPullFailedReason string = "OCIArtifactPullFailed" + + // OCILayerOperationFailedReason signals that an OCI layer operation failed. + OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in OCIRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *OCIRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the OCIRepository must be +// reconciled again. +func (in OCIRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the OCIRepository if present in +// the status sub-resource. +func (in *OCIRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetLayerMediaType returns the media type layer selector if found in spec. +func (in *OCIRepository) GetLayerMediaType() string { + if in.Spec.LayerSelector == nil { + return "" + } + + return in.Spec.LayerSelector.MediaType +} + +// GetLayerOperation returns the layer selector operation (defaults to extract). +func (in *OCIRepository) GetLayerOperation() string { + if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" { + return OCILayerExtract + } + + return in.Spec.LayerSelector.Operation +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ocirepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// OCIRepository is the Schema for the ocirepositories API +type OCIRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OCIRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status OCIRepositoryStatus `json:"status,omitempty"` +} + +// OCIRepositoryList contains a list of OCIRepository +// +kubebuilder:object:root=true +type OCIRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OCIRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{}) +} diff --git a/api/v1/ociverification_types.go b/api/v1/ociverification_types.go new file mode 100644 index 000000000..de74be343 --- /dev/null +++ b/api/v1/ociverification_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/fluxcd/pkg/apis/meta" +) + +// OCIRepositoryVerification verifies the authenticity of an OCI Artifact +type OCIRepositoryVerification struct { + // Provider specifies the technology used to sign the OCI Artifact. + // +kubebuilder:validation:Enum=cosign;notation + // +kubebuilder:default:=cosign + Provider string `json:"provider"` + + // SecretRef specifies the Kubernetes Secret containing the + // trusted public keys. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // MatchOIDCIdentity specifies the identity matching criteria to use + // while verifying an OCI artifact which was signed using Cosign keyless + // signing. The artifact's identity is deemed to be verified if any of the + // specified matchers match against the identity. + // +optional + MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"` +} + +// OIDCIdentityMatch specifies options for verifying the certificate identity, +// i.e. the issuer and the subject of the certificate. +type OIDCIdentityMatch struct { + // Issuer specifies the regex pattern to match against to verify + // the OIDC issuer in the Fulcio certificate. The pattern must be a + // valid Go regular expression. + // +required + Issuer string `json:"issuer"` + // Subject specifies the regex pattern to match against to verify + // the identity subject in the Fulcio certificate. The pattern must + // be a valid Go regular expression. + // +required + Subject string `json:"subject"` +} diff --git a/api/v1/source.go b/api/v1/source.go new file mode 100644 index 000000000..d879f6034 --- /dev/null +++ b/api/v1/source.go @@ -0,0 +1,47 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // SourceIndexKey is the key used for indexing objects based on their + // referenced Source. + SourceIndexKey string = ".metadata.source" +) + +// Source interface must be supported by all API types. +// Source is the interface that provides generic access to the Artifact and +// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io +// API group. +// +// +k8s:deepcopy-gen=false +type Source interface { + runtime.Object + // GetRequeueAfter returns the duration after which the source must be + // reconciled again. + GetRequeueAfter() time.Duration + // GetArtifact returns the latest artifact from the source if present in + // the status sub-resource. + GetArtifact() *meta.Artifact +} diff --git a/api/v1/sts_types.go b/api/v1/sts_types.go new file mode 100644 index 000000000..4b1d05881 --- /dev/null +++ b/api/v1/sts_types.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" +) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..14f1ba3c2 --- /dev/null +++ b/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,998 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifact) DeepCopyInto(out *ExternalArtifact) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifact. +func (in *ExternalArtifact) DeepCopy() *ExternalArtifact { + if in == nil { + return nil + } + out := new(ExternalArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifact) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactList) DeepCopyInto(out *ExternalArtifactList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalArtifact, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactList. +func (in *ExternalArtifactList) DeepCopy() *ExternalArtifactList { + if in == nil { + return nil + } + out := new(ExternalArtifactList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifactList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactSpec) DeepCopyInto(out *ExternalArtifactSpec) { + *out = *in + if in.SourceRef != nil { + in, out := &in.SourceRef, &out.SourceRef + *out = new(meta.NamespacedObjectKindReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactSpec. +func (in *ExternalArtifactSpec) DeepCopy() *ExternalArtifactSpec { + if in == nil { + return nil + } + out := new(ExternalArtifactSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactStatus) DeepCopyInto(out *ExternalArtifactStatus) { + *out = *in + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactStatus. +func (in *ExternalArtifactStatus) DeepCopy() *ExternalArtifactStatus { + if in == nil { + return nil + } + out := new(ExternalArtifactStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepository) DeepCopyInto(out *GitRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository. +func (in *GitRepository) DeepCopy() *GitRepository { + if in == nil { + return nil + } + out := new(GitRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) { + *out = *in + out.GitRepositoryRef = in.GitRepositoryRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude. +func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude { + if in == nil { + return nil + } + out := new(GitRepositoryInclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GitRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList. +func (in *GitRepositoryList) DeepCopy() *GitRepositoryList { + if in == nil { + return nil + } + out := new(GitRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef. +func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef { + if in == nil { + return nil + } + out := new(GitRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(GitRepositoryRef) + **out = **in + } + if in.Verification != nil { + in, out := &in.Verification, &out.Verification + *out = new(GitRepositoryVerification) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.SparseCheckout != nil { + in, out := &in.SparseCheckout, &out.SparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. +func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec { + if in == nil { + return nil + } + out := new(GitRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.IncludedArtifacts != nil { + in, out := &in.IncludedArtifacts, &out.IncludedArtifacts + *out = make([]*meta.Artifact, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + } + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedInclude != nil { + in, out := &in.ObservedInclude, &out.ObservedInclude + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.ObservedSparseCheckout != nil { + in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceVerificationMode != nil { + in, out := &in.SourceVerificationMode, &out.SourceVerificationMode + *out = new(GitVerificationMode) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus. +func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus { + if in == nil { + return nil + } + out := new(GitRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification. +func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification { + if in == nil { + return nil + } + out := new(GitRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChart) DeepCopyInto(out *HelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart. +func (in *HelmChart) DeepCopy() *HelmChart { + if in == nil { + return nil + } + out := new(HelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartList) DeepCopyInto(out *HelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList. +func (in *HelmChartList) DeepCopy() *HelmChartList { + if in == nil { + return nil + } + out := new(HelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { + *out = *in + out.SourceRef = in.SourceRef + out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. +func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { + if in == nil { + return nil + } + out := new(HelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { + *out = *in + if in.ObservedValuesFiles != nil { + in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus. +func (in *HelmChartStatus) DeepCopy() *HelmChartStatus { + if in == nil { + return nil + } + out := new(HelmChartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList. +func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList { + if in == nil { + return nil + } + out := new(HelmRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. +func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec { + if in == nil { + return nil + } + out := new(HelmRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus. +func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference. +func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference { + if in == nil { + return nil + } + out := new(LocalHelmChartSourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector. +func (in *OCILayerSelector) DeepCopy() *OCILayerSelector { + if in == nil { + return nil + } + out := new(OCILayerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepository) DeepCopyInto(out *OCIRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository. +func (in *OCIRepository) DeepCopy() *OCIRepository { + if in == nil { + return nil + } + out := new(OCIRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList. +func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList { + if in == nil { + return nil + } + out := new(OCIRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef. +func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef { + if in == nil { + return nil + } + out := new(OCIRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(OCIRepositoryRef) + **out = **in + } + if in.LayerSelector != nil { + in, out := &in.LayerSelector, &out.LayerSelector + *out = new(OCILayerSelector) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec. +func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec { + if in == nil { + return nil + } + out := new(OCIRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedLayerSelector != nil { + in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector + *out = new(OCILayerSelector) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus. +func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { + if in == nil { + return nil + } + out := new(OCIRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.MatchOIDCIdentity != nil { + in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity + *out = make([]OIDCIdentityMatch, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification. +func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification { + if in == nil { + return nil + } + out := new(OCIRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch. +func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch { + if in == nil { + return nil + } + out := new(OIDCIdentityMatch) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1beta1/artifact_types.go b/api/v1beta1/artifact_types.go index c7ddffce3..3fd0d2dfe 100644 --- a/api/v1beta1/artifact_types.go +++ b/api/v1beta1/artifact_types.go @@ -39,7 +39,7 @@ type Artifact struct { // +optional Revision string `json:"revision"` - // Checksum is the SHA1 checksum of the artifact. + // Checksum is the SHA256 checksum of the artifact. // +optional Checksum string `json:"checksum"` diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 492002b82..e64321c9d 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -17,9 +17,11 @@ limitations under the License. package v1beta1 import ( - "github.com/fluxcd/pkg/apis/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" ) const ( @@ -30,7 +32,7 @@ const ( // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). - // +kubebuilder:validation:Enum=generic;aws + // +kubebuilder:validation:Enum=generic;aws;gcp // +kubebuilder:default:=generic // +optional Provider string `json:"provider,omitempty"` @@ -60,8 +62,8 @@ type BucketSpec struct { // +required Interval metav1.Duration `json:"interval"` - // The timeout for download operations, defaults to 20s. - // +kubebuilder:default="20s" + // The timeout for download operations, defaults to 60s. + // +kubebuilder:default="60s" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -74,11 +76,16 @@ type BucketSpec struct { // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } const ( GenericBucketProvider string = "generic" AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" ) // BucketStatus defines the observed state of a bucket @@ -119,7 +126,13 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } @@ -129,14 +142,26 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } @@ -168,20 +193,16 @@ func (in *Bucket) GetInterval() metav1.Duration { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // Bucket is the Schema for the buckets API type Bucket struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec BucketSpec `json:"spec,omitempty"` + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} Status BucketStatus `json:"status,omitempty"` } diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go index 7a768a45d..f604a2624 100644 --- a/api/v1beta1/doc.go +++ b/api/v1beta1/doc.go @@ -15,6 +15,9 @@ limitations under the License. */ // Package v1beta1 contains API Schema definitions for the source v1beta1 API group +// +// Deprecated: v1beta1 is no longer supported, use v1 instead. +// // +kubebuilder:object:generate=true // +groupName=source.toolkit.fluxcd.io package v1beta1 diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index 8bc69ff24..05cce7c60 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -17,32 +17,35 @@ limitations under the License. package v1beta1 import ( - "github.com/fluxcd/pkg/apis/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" ) const ( // GitRepositoryKind is the string representation of a GitRepository. GitRepositoryKind = "GitRepository" - // GoGitImplementation represents the go-git git implementation kind. + + // GoGitImplementation represents the go-git Git implementation kind. GoGitImplementation = "go-git" - // LibGit2Implementation represents the gi2go git implementation kind. + // LibGit2Implementation represents the git2go Git implementation kind. LibGit2Implementation = "libgit2" ) // GitRepositorySpec defines the desired state of a Git repository. type GitRepositorySpec struct { // The repository URL, can be a HTTP/S or SSH address. - // +kubebuilder:validation:Pattern="^(http|https|ssh)://" + // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" // +required URL string `json:"url"` // The secret name containing the Git credentials. // For HTTPS repositories the secret must contain username and password // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For SSH repositories the secret must contain identity and known_hosts + // fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -50,8 +53,8 @@ type GitRepositorySpec struct { // +required Interval metav1.Duration `json:"interval"` - // The timeout for remote Git operations like cloning, defaults to 20s. - // +kubebuilder:default="20s" + // The timeout for remote Git operations like cloning, defaults to 60s. + // +kubebuilder:default="60s" // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -80,12 +83,49 @@ type GitRepositorySpec struct { // +kubebuilder:default:=go-git // +optional GitImplementation string `json:"gitImplementation,omitempty"` + + // When enabled, after the clone is created, initializes all submodules within, + // using their default settings. + // This option is available only when using the 'go-git' GitImplementation. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Extra git repositories to map into the repository + Include []GitRepositoryInclude `json:"include,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +func (in *GitRepositoryInclude) GetFromPath() string { + return in.FromPath +} + +func (in *GitRepositoryInclude) GetToPath() string { + if in.ToPath == "" { + return in.GitRepositoryRef.Name + } + return in.ToPath +} + +// GitRepositoryInclude defines a source with a from and to path. +type GitRepositoryInclude struct { + // Reference to a GitRepository to include. + GitRepositoryRef meta.LocalObjectReference `json:"repository"` + + // The path to copy contents from, defaults to the root directory. + // +optional + FromPath string `json:"fromPath"` + + // The path to copy contents to, defaults to the name of the source ref. + // +optional + ToPath string `json:"toPath"` } // GitRepositoryRef defines the Git ref used for pull and checkout operations. type GitRepositoryRef struct { // The Git branch to checkout, defaults to master. - // +kubebuilder:default:=master // +optional Branch string `json:"branch,omitempty"` @@ -131,6 +171,10 @@ type GitRepositoryStatus struct { // +optional Artifact *Artifact `json:"artifact,omitempty"` + // IncludedArtifacts represents the included artifacts from the last successful repository sync. + // +optional + IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` + meta.ReconcileRequestStatus `json:",inline"` } @@ -152,17 +196,30 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } // GitRepositoryReady sets the given Artifact and URL on the GitRepository and // sets the meta.ReadyCondition to 'True', with the given reason and message. It // returns the modified GitRepository. -func GitRepositoryReady(repository GitRepository, artifact Artifact, url, reason, message string) GitRepository { +func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArtifacts []*Artifact, url, reason, message string) GitRepository { repository.Status.Artifact = &artifact + repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -170,7 +227,13 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, url, reason // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -202,20 +265,17 @@ func (in *GitRepository) GetInterval() metav1.Duration { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:resource:shortName=gitrepo +// +kubebuilder:skipversion // GitRepository is the Schema for the gitrepositories API type GitRepository struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec GitRepositorySpec `json:"spec,omitempty"` + Spec GitRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} Status GitRepositoryStatus `json:"status,omitempty"` } diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index aa19edfee..22e5dda58 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -17,9 +17,11 @@ limitations under the License. package v1beta1 import ( - "github.com/fluxcd/pkg/apis/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" ) // HelmChartKind is the string representation of a HelmChart. @@ -45,16 +47,47 @@ type HelmChartSpec struct { // +required Interval metav1.Duration `json:"interval"` - // Alternative values file to use as the default chart values, expected to be a - // relative path in the SourceRef. Ignored when omitted. + // Determines what enables the creation of a new artifact. Valid values are + // ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // Alternative list of values files to use as the chart values (values.yaml + // is not included by default), expected to be a relative path in the SourceRef. + // Values files are merged in the order of this list with the last file overriding + // the first. Ignored when omitted. // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // Alternative values file to use as the default chart values, expected to + // be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, + // for backwards compatibility the file defined here is merged before the + // ValuesFiles items. Ignored when omitted. + // +optional + // +deprecated ValuesFile string `json:"valuesFile,omitempty"` // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + // LocalHelmChartSourceReference contains enough information to let you locate // the typed referenced object at namespace level. type LocalHelmChartSourceReference struct { @@ -119,7 +152,13 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -129,7 +168,13 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -137,7 +182,13 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -168,24 +219,29 @@ func (in *HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } +// GetValuesFiles returns a merged list of ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + valuesFiles := in.Spec.ValuesFiles + + // Prepend the deprecated ValuesFile to the list + if in.Spec.ValuesFile != "" { + valuesFiles = append([]string{in.Spec.ValuesFile}, valuesFiles...) + } + return valuesFiles +} + // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` -// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` -// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` -// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:resource:shortName=hc +// +kubebuilder:skipversion // HelmChart is the Schema for the helmcharts API type HelmChart struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec HelmChartSpec `json:"spec,omitempty"` + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} Status HelmChartStatus `json:"status,omitempty"` } diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 5e32d4424..4530b82a9 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -17,9 +17,11 @@ limitations under the License. package v1beta1 import ( - "github.com/fluxcd/pkg/apis/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" ) const ( @@ -41,10 +43,19 @@ type HelmRepositorySpec struct { // For HTTP/S basic auth the secret must contain username and // password fields. // For TLS the secret must contain a certFile and keyFile, and/or - // caCert fields. + // caFile fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + // PassCredentials allows the credentials from the SecretRef to be passed on to + // a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the index + // differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result in + // credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + // The interval at which to check the upstream for updates. // +required Interval metav1.Duration `json:"interval"` @@ -57,6 +68,10 @@ type HelmRepositorySpec struct { // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } // HelmRepositoryStatus defines the observed state of the HelmRepository. @@ -98,7 +113,13 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -108,7 +129,13 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -116,7 +143,13 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -148,20 +181,17 @@ func (in *HelmRepository) GetInterval() metav1.Duration { } // +genclient -// +genclient:Namespaced // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:skipversion // HelmRepository is the Schema for the helmrepositories API type HelmRepository struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec HelmRepositorySpec `json:"spec,omitempty"` + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} Status HelmRepositoryStatus `json:"status,omitempty"` } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 24e929f38..10be7301e 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ -// +build !ignore_autogenerated +//go:build !ignore_autogenerated /* -Copyright 2020 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ limitations under the License. package v1beta1 import ( + "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -120,6 +121,11 @@ func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = new(string) **out = **in } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. @@ -187,6 +193,22 @@ func (in *GitRepository) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) { + *out = *in + out.GitRepositoryRef = in.GitRepositoryRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude. +func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude { + if in == nil { + return nil + } + out := new(GitRepositoryInclude) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) { *out = *in @@ -263,6 +285,16 @@ func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { *out = new(string) **out = **in } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. @@ -290,6 +322,17 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { *out = new(Artifact) (*in).DeepCopyInto(*out) } + if in.IncludedArtifacts != nil { + in, out := &in.IncludedArtifacts, &out.IncludedArtifacts + *out = make([]*Artifact, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + } + } out.ReconcileRequestStatus = in.ReconcileRequestStatus } @@ -324,7 +367,7 @@ func (in *HelmChart) DeepCopyInto(out *HelmChart) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -383,6 +426,16 @@ func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { *out = *in out.SourceRef = in.SourceRef out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. @@ -496,6 +549,11 @@ func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { *out = new(v1.Duration) **out = **in } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. diff --git a/api/v1beta2/artifact_types.go b/api/v1beta2/artifact_types.go new file mode 100644 index 000000000..cc88d2a0c --- /dev/null +++ b/api/v1beta2/artifact_types.go @@ -0,0 +1,159 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "path" + "regexp" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Artifact represents the output of a Source reconciliation. +// +// Deprecated: use Artifact from api/v1 instead. This type will be removed in +// a future release. +type Artifact struct { + // Path is the relative file path of the Artifact. It can be used to locate + // the file in the root of the Artifact storage on the local file system of + // the controller managing the Source. + // +required + Path string `json:"path"` + + // URL is the HTTP address of the Artifact as exposed by the controller + // managing the Source. It can be used to retrieve the Artifact for + // consumption, e.g. by another controller applying the Artifact contents. + // +required + URL string `json:"url"` + + // Revision is a human-readable identifier traceable in the origin source + // system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + // +optional + Revision string `json:"revision"` + + // Checksum is the SHA256 checksum of the Artifact file. + // Deprecated: use Artifact.Digest instead. + // +optional + Checksum string `json:"checksum,omitempty"` + + // Digest is the digest of the file in the form of ':'. + // +optional + // +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$" + Digest string `json:"digest,omitempty"` + + // LastUpdateTime is the timestamp corresponding to the last update of the + // Artifact. + // +required + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + + // Size is the number of bytes in the file. + // +optional + Size *int64 `json:"size,omitempty"` + + // Metadata holds upstream information such as OCI annotations. + // +optional + Metadata map[string]string `json:"metadata,omitempty"` +} + +// HasRevision returns if the given revision matches the current Revision of +// the Artifact. +func (in *Artifact) HasRevision(revision string) bool { + if in == nil { + return false + } + return TransformLegacyRevision(in.Revision) == TransformLegacyRevision(revision) +} + +// HasChecksum returns if the given checksum matches the current Checksum of +// the Artifact. +func (in *Artifact) HasChecksum(checksum string) bool { + if in == nil { + return false + } + return in.Checksum == checksum +} + +// ArtifactDir returns the artifact dir path in the form of +// '//'. +func ArtifactDir(kind, namespace, name string) string { + kind = strings.ToLower(kind) + return path.Join(kind, namespace, name) +} + +// ArtifactPath returns the artifact path in the form of +// '//name>/'. +func ArtifactPath(kind, namespace, name, filename string) string { + return path.Join(ArtifactDir(kind, namespace, name), filename) +} + +// TransformLegacyRevision transforms a "legacy" revision string into a "new" +// revision string. It accepts the following formats: +// +// - main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc +// - d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd +// +// Which are transformed into the following formats respectively: +// +// - main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738 +// - tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc +// - sha256:d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd +// +// Deprecated, this function exists for backwards compatibility with existing +// resources, and to provide a transition period. Will be removed in a future +// release. +func TransformLegacyRevision(rev string) string { + if rev != "" && strings.LastIndex(rev, ":") == -1 { + if i := strings.LastIndex(rev, "/"); i >= 0 { + sha := rev[i+1:] + if algo := determineSHAType(sha); algo != "" { + if name := rev[:i]; name != "HEAD" { + return name + "@" + algo + ":" + sha + } + return algo + ":" + sha + } + } + if algo := determineSHAType(rev); algo != "" { + return algo + ":" + rev + } + } + return rev +} + +// isAlphaNumHex returns true if the given string only contains 0-9 and a-f +// characters. +var isAlphaNumHex = regexp.MustCompile(`^[0-9a-f]+$`).MatchString + +// determineSHAType returns the SHA algorithm used to compute the provided hex. +// The determination is heuristic and based on the length of the hex string. If +// the size is not recognized, an empty string is returned. +func determineSHAType(hex string) string { + if isAlphaNumHex(hex) { + switch len(hex) { + case 40: + return "sha1" + case 64: + return "sha256" + } + } + return "" +} diff --git a/api/v1beta2/artifact_types_test.go b/api/v1beta2/artifact_types_test.go new file mode 100644 index 000000000..ccf578de3 --- /dev/null +++ b/api/v1beta2/artifact_types_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import "testing" + +func TestTransformLegacyRevision(t *testing.T) { + tests := []struct { + rev string + want string + }{ + { + rev: "HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738", + }, + { + rev: "5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c", + want: "sha256:5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c", + }, + { + rev: "v1.0.0", + want: "v1.0.0", + }, + { + rev: "v1.0.0-rc1", + want: "v1.0.0-rc1", + }, + { + rev: "v1.0.0-rc1+metadata", + want: "v1.0.0-rc1+metadata", + }, + { + rev: "arbitrary/revision", + want: "arbitrary/revision", + }, + { + rev: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx", + want: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx", + }, + } + for _, tt := range tests { + t.Run(tt.rev, func(t *testing.T) { + if got := TransformLegacyRevision(tt.rev); got != tt.want { + t.Errorf("TransformLegacyRevision() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go new file mode 100644 index 000000000..6495abdd0 --- /dev/null +++ b/api/v1beta2/bucket_types.go @@ -0,0 +1,301 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + + apiv1 "github.com/fluxcd/source-controller/api/v1" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = apiv1.BucketProviderGeneric + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service. + BucketProviderAmazon string = apiv1.BucketProviderAmazon + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = apiv1.BucketProviderGoogle + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = apiv1.BucketProviderAzure + + // GenericBucketProvider for any S3 API compatible storage Bucket. + // + // Deprecated: use BucketProviderGeneric. + GenericBucketProvider string = apiv1.BucketProviderGeneric + // AmazonBucketProvider for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service. + // + // Deprecated: use BucketProviderAmazon. + AmazonBucketProvider string = apiv1.BucketProviderAmazon + // GoogleBucketProvider for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + // + // Deprecated: use BucketProviderGoogle. + GoogleBucketProvider string = apiv1.BucketProviderGoogle + // AzureBucketProvider for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + // + // Deprecated: use BucketProviderAzure. + AzureBucketProvider string = apiv1.BucketProviderAzure +) + +// BucketSpec specifies the required configuration to produce an Artifact for +// an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" +type BucketSpec struct { + // Provider of the object storage bucket. + // Defaults to 'generic', which expects an S3 (API) compatible object + // storage. + // +kubebuilder:validation:Enum=generic;aws;gcp;azure + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // BucketName is the name of the object storage bucket. + // +required + BucketName string `json:"bucketName"` + + // Endpoint is the object storage address the BucketName is located at. + // +required + Endpoint string `json:"endpoint"` + + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP Endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Region of the Endpoint where the BucketName is located in. + // +optional + Region string `json:"region,omitempty"` + + // Prefix to use for server-side filtering of files in the Bucket. + // +optional + Prefix string `json:"prefix,omitempty"` + + // SecretRef specifies the Secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the Bucket Endpoint is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for fetch operations, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // Bucket. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + +// BucketStatus records the observed state of a Bucket. +type BucketStatus struct { + // ObservedGeneration is the last observed generation of the Bucket object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful Bucket reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceededReason signals that the Bucket listing and fetch + // operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" + + // BucketOperationFailedReason signals that the Bucket listing or fetch + // operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *Bucket) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1" +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// Bucket is the Schema for the buckets API. +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// BucketList contains a list of Bucket objects. +// +kubebuilder:object:root=true +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go new file mode 100644 index 000000000..2b93a1795 --- /dev/null +++ b/api/v1beta2/condition_types.go @@ -0,0 +1,107 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +const SourceFinalizer = "finalizers.fluxcd.io" + +const ( + // ArtifactInStorageCondition indicates the availability of the Artifact in + // the storage. + // If True, the Artifact is stored successfully. + // This Condition is only present on the resource if the Artifact is + // successfully stored. + ArtifactInStorageCondition string = "ArtifactInStorage" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source + // is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // SourceVerifiedCondition indicates the integrity verification of the + // Source. + // If True, the integrity check succeeded. If False, it failed. + // This Condition is only present on the resource if the integrity check + // is enabled. + SourceVerifiedCondition string = "SourceVerified" + + // FetchFailedCondition indicates a transient or persistent fetch failure + // of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, + // and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" + + // BuildFailedCondition indicates a transient or persistent build failure + // of a Source's Artifact. + // If True, the Source can be in an ArtifactOutdatedCondition. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + BuildFailedCondition string = "BuildFailed" + + // StorageOperationFailedCondition indicates a transient or persistent + // failure related to storage. If True, the reconciliation failed while + // performing some filesystem operation. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + StorageOperationFailedCondition string = "StorageOperationFailed" +) + +// Reasons are provided as utility, and not part of the declarative API. +const ( + // URLInvalidReason signals that a given Source has an invalid URL. + URLInvalidReason string = "URLInvalid" + + // AuthenticationFailedReason signals that a Secret does not have the + // required fields, or the provided credentials do not match. + AuthenticationFailedReason string = "AuthenticationFailed" + + // VerificationError signals that the Source's verification + // check failed. + VerificationError string = "VerificationError" + + // DirCreationFailedReason signals a failure caused by a directory creation + // operation. + DirCreationFailedReason string = "DirectoryCreationFailed" + + // StatOperationFailedReason signals a failure caused by a stat operation on + // a path. + StatOperationFailedReason string = "StatOperationFailed" + + // ReadOperationFailedReason signals a failure caused by a read operation. + ReadOperationFailedReason string = "ReadOperationFailed" + + // AcquireLockFailedReason signals a failure in acquiring lock. + AcquireLockFailedReason string = "AcquireLockFailed" + + // InvalidPathReason signals a failure caused by an invalid path. + InvalidPathReason string = "InvalidPath" + + // ArchiveOperationFailedReason signals a failure in archive operation. + ArchiveOperationFailedReason string = "ArchiveOperationFailed" + + // SymlinkUpdateFailedReason signals a failure in updating a symlink. + SymlinkUpdateFailedReason string = "SymlinkUpdateFailed" + + // ArtifactUpToDateReason signals that an existing Artifact is up-to-date + // with the Source. + ArtifactUpToDateReason string = "ArtifactUpToDate" + + // CacheOperationFailedReason signals a failure in cache operation. + CacheOperationFailedReason string = "CacheOperationFailed" +) diff --git a/internal/util/util.go b/api/v1beta2/doc.go similarity index 59% rename from internal/util/util.go rename to api/v1beta2/doc.go index 4d9e19adf..e9fca1650 100644 --- a/internal/util/util.go +++ b/api/v1beta2/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// ObjectKey returns client.ObjectKey for the object. -func ObjectKey(object metav1.Object) client.ObjectKey { - return client.ObjectKey{ - Namespace: object.GetNamespace(), - Name: object.GetName(), - } -} +// Package v1beta2 contains API Schema definitions for the source v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=source.toolkit.fluxcd.io +package v1beta2 diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go new file mode 100644 index 000000000..89beeb9a7 --- /dev/null +++ b/api/v1beta2/gitrepository_types.go @@ -0,0 +1,317 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // GitRepositoryKind is the string representation of a GitRepository. + GitRepositoryKind = "GitRepository" + + // GoGitImplementation for performing Git operations using go-git. + GoGitImplementation = "go-git" + // LibGit2Implementation for performing Git operations using libgit2. + LibGit2Implementation = "libgit2" +) + +const ( + // IncludeUnavailableCondition indicates one of the includes is not + // available. For example, because it does not exist, or does not have an + // Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only + // present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" +) + +// GitRepositorySpec specifies the required configuration to produce an +// Artifact for a Git repository. +type GitRepositorySpec struct { + // URL specifies the Git repository URL, it can be an HTTP/S or SSH address. + // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials for + // the GitRepository. + // For HTTPS repositories the Secret must contain 'username' and 'password' + // fields for basic auth or 'bearerToken' field for token auth. + // For SSH repositories the Secret must contain 'identity' + // and 'known_hosts' fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Interval at which to check the GitRepository for updates. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for Git operations like cloning, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Reference specifies the Git reference to resolve and monitor for + // changes, defaults to the 'master' branch. + // +optional + Reference *GitRepositoryRef `json:"ref,omitempty"` + + // Verification specifies the configuration to verify the Git commit + // signature(s). + // +optional + Verification *GitRepositoryVerification `json:"verify,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // GitRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // GitImplementation specifies which Git client library implementation to + // use. Defaults to 'go-git', valid values are ('go-git', 'libgit2'). + // Deprecated: gitImplementation is deprecated now that 'go-git' is the + // only supported implementation. + // +kubebuilder:validation:Enum=go-git;libgit2 + // +kubebuilder:default:=go-git + // +optional + GitImplementation string `json:"gitImplementation,omitempty"` + + // RecurseSubmodules enables the initialization of all submodules within + // the GitRepository as cloned from the URL, using their default settings. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Include specifies a list of GitRepository resources which Artifacts + // should be included in the Artifact produced for this GitRepository. + Include []GitRepositoryInclude `json:"include,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +// GitRepositoryInclude specifies a local reference to a GitRepository which +// Artifact (sub-)contents must be included, and where they should be placed. +type GitRepositoryInclude struct { + // GitRepositoryRef specifies the GitRepository which Artifact contents + // must be included. + GitRepositoryRef meta.LocalObjectReference `json:"repository"` + + // FromPath specifies the path to copy contents from, defaults to the root + // of the Artifact. + // +optional + FromPath string `json:"fromPath"` + + // ToPath specifies the path to copy contents to, defaults to the name of + // the GitRepositoryRef. + // +optional + ToPath string `json:"toPath"` +} + +// GetFromPath returns the specified FromPath. +func (in *GitRepositoryInclude) GetFromPath() string { + return in.FromPath +} + +// GetToPath returns the specified ToPath, falling back to the name of the +// GitRepositoryRef. +func (in *GitRepositoryInclude) GetToPath() string { + if in.ToPath == "" { + return in.GitRepositoryRef.Name + } + return in.ToPath +} + +// GitRepositoryRef specifies the Git reference to resolve and checkout. +type GitRepositoryRef struct { + // Branch to check out, defaults to 'master' if no other field is defined. + // +optional + Branch string `json:"branch,omitempty"` + + // Tag to check out, takes precedence over Branch. + // +optional + Tag string `json:"tag,omitempty"` + + // SemVer tag expression to check out, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + // + // It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + // Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" + // +optional + Name string `json:"name,omitempty"` + + // Commit SHA to check out, takes precedence over all reference fields. + // + // This can be combined with Branch to shallow clone the branch, in which + // the commit is expected to exist. + // +optional + Commit string `json:"commit,omitempty"` +} + +// GitRepositoryVerification specifies the Git commit signature verification +// strategy. +type GitRepositoryVerification struct { + // Mode specifies what Git object should be verified, currently ('head'). + // +kubebuilder:validation:Enum=head + Mode string `json:"mode"` + + // SecretRef specifies the Secret containing the public keys of trusted Git + // authors. + SecretRef meta.LocalObjectReference `json:"secretRef"` +} + +// GitRepositoryStatus records the observed state of a Git repository. +type GitRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the GitRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the GitRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // GitRepositoryStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful GitRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // IncludedArtifacts contains a list of the last successfully included + // Artifacts as instructed by GitRepositorySpec.Include. + // +optional + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` + + // ContentConfigChecksum is a checksum of all the configurations related to + // the content of the source artifact: + // - .spec.ignore + // - .spec.recurseSubmodules + // - .spec.included and the checksum of the included artifacts + // observed in .status.observedGeneration version of the object. This can + // be used to determine if the content of the included repository has + // changed. + // It has the format of `:`, for example: `sha256:`. + // + // Deprecated: Replaced with explicit fields for observed artifact content + // config in the status. + // +optional + ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedRecurseSubmodules is the observed resource submodules + // configuration used to produce the current Artifact. + // +optional + ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"` + + // ObservedInclude is the observed list of GitRepository resources used to + // to produce the current Artifact. + // +optional + ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // GitOperationSucceedReason signals that a Git operation (e.g. clone, + // checkout, etc.) succeeded. + GitOperationSucceedReason string = "GitOperationSucceeded" + + // GitOperationFailedReason signals that a Git operation (e.g. clone, + // checkout, etc.) failed. + GitOperationFailedReason string = "GitOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the GitRepository must be +// reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the GitRepository if present in +// the status sub-resource. +func (in *GitRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=gitrepo +// +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1" +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// GitRepository is the Schema for the gitrepositories API. +type GitRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GitRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status GitRepositoryStatus `json:"status,omitempty"` +} + +// GitRepositoryList contains a list of GitRepository objects. +// +kubebuilder:object:root=true +type GitRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GitRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{}) +} diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..797e6c536 --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go new file mode 100644 index 000000000..ac24b1c13 --- /dev/null +++ b/api/v1beta2/helmchart_types.go @@ -0,0 +1,250 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + + apiv1 "github.com/fluxcd/source-controller/api/v1" +) + +// HelmChartKind is the string representation of a HelmChart. +const HelmChartKind = "HelmChart" + +// HelmChartSpec specifies the desired state of a Helm chart. +type HelmChartSpec struct { + // Chart is the name or path the Helm chart is available at in the + // SourceRef. + // +required + Chart string `json:"chart"` + + // Version is the chart version semver expression, ignored for charts from + // GitRepository and Bucket sources. Defaults to latest when omitted. + // +kubebuilder:default:=* + // +optional + Version string `json:"version,omitempty"` + + // SourceRef is the reference to the Source the chart is available at. + // +required + SourceRef LocalHelmChartSourceReference `json:"sourceRef"` + + // Interval at which the HelmChart SourceRef is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // ReconcileStrategy determines what enables the creation of a new artifact. + // Valid values are ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // ValuesFiles is an alternative list of values files to use as the chart + // values (values.yaml is not included by default), expected to be a + // relative path in the SourceRef. + // Values files are merged in the order of this list with the last file + // overriding the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // ValuesFile is an alternative values file to use as the default chart + // values, expected to be a relative path in the SourceRef. Deprecated in + // favor of ValuesFiles, for backwards compatibility the file specified here + // is merged before the ValuesFiles items. Ignored when omitted. + // +optional + // +deprecated + ValuesFile string `json:"valuesFile,omitempty"` + + // IgnoreMissingValuesFiles controls whether to silently ignore missing values + // files rather than failing. + // +optional + IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // This field is only supported when using HelmRepository source with spec.type 'oci'. + // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + // +optional + Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"` +} + +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + +// LocalHelmChartSourceReference contains enough information to let you locate +// the typed referenced object at namespace level. +type LocalHelmChartSourceReference struct { + // APIVersion of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + // 'Bucket'). + // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// HelmChartStatus records the observed state of the HelmChart. +type HelmChartStatus struct { + // ObservedGeneration is the last observed generation of the HelmChart + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ObservedSourceArtifactRevision is the last observed Artifact.Revision + // of the HelmChartSpec.SourceRef. + // +optional + ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"` + + // ObservedChartName is the last observed chart name as specified by the + // resolved chart reference. + // +optional + ObservedChartName string `json:"observedChartName,omitempty"` + + // ObservedValuesFiles are the observed value files of the last successful + // reconciliation. + // It matches the chart in the last successfully reconciled artifact. + // +optional + ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"` + + // Conditions holds the conditions for the HelmChart. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // ChartPullSucceededReason signals that the pull of the Helm chart + // succeeded. + ChartPullSucceededReason string = "ChartPullSucceeded" + + // ChartPackageSucceededReason signals that the package of the Helm + // chart succeeded. + ChartPackageSucceededReason string = "ChartPackageSucceeded" +) + +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmChart) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + valuesFiles := in.Spec.ValuesFiles + + // Prepend the deprecated ValuesFile to the list + if in.Spec.ValuesFile != "" { + valuesFiles = append([]string{in.Spec.ValuesFile}, valuesFiles...) + } + return valuesFiles +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=hc +// +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1" +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` +// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmChart is the Schema for the helmcharts API. +type HelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmChartStatus `json:"status,omitempty"` +} + +// HelmChartList contains a list of HelmChart objects. +// +kubebuilder:object:root=true +type HelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmChart `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmChart{}, &HelmChartList{}) +} diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go new file mode 100644 index 000000000..56cbd928c --- /dev/null +++ b/api/v1beta2/helmrepository_types.go @@ -0,0 +1,228 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // HelmRepositoryKind is the string representation of a HelmRepository. + HelmRepositoryKind = "HelmRepository" + // HelmRepositoryURLIndexKey is the key used for indexing HelmRepository + // objects by their HelmRepositorySpec.URL. + HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" + // HelmRepositoryTypeDefault is the default HelmRepository type. + // It is used when no type is specified and corresponds to a Helm repository. + HelmRepositoryTypeDefault = "default" + // HelmRepositoryTypeOCI is the type for an OCI repository. + HelmRepositoryTypeOCI = "oci" +) + +// HelmRepositorySpec specifies the required configuration to produce an +// Artifact for a Helm repository index YAML. +type HelmRepositorySpec struct { + // URL of the Helm repository, a valid URL contains at least a protocol and + // host. + // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials + // for the HelmRepository. + // For HTTP/S basic auth the secret must contain 'username' and 'password' + // fields. + // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + // keys is deprecated. Please use `.spec.certSecretRef` instead. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // It takes precedence over the values specified in the Secret referred + // to by `.spec.secretRef`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // PassCredentials allows the credentials from the SecretRef to be passed + // on to a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the + // index differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result + // in credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + + // Interval at which the HelmRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +optional + Interval metav1.Duration `json:"interval,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // This field is only taken into account if the .spec.type field is set to 'oci'. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Timeout is used for the index fetch operation for an HTTPS helm repository, + // and for remote OCI Repository operations like pulling for an OCI helm + // chart by the associated HelmChart. + // Its default value is 60s. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // HelmRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` + + // Type of the HelmRepository. + // When this field is set to "oci", the URL field value must be prefixed with "oci://". + // +kubebuilder:validation:Enum=default;oci + // +optional + Type string `json:"type,omitempty"` + + // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` +} + +// HelmRepositoryStatus records the observed state of the HelmRepository. +type HelmRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the HelmRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // HelmRepositoryStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful HelmRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // IndexationFailedReason signals that the HelmRepository index fetch + // failed. + IndexationFailedReason string = "IndexationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + if in.Spec.Interval.Duration != 0 { + return in.Spec.Interval.Duration + } + return time.Minute +} + +// GetTimeout returns the timeout duration used for various operations related +// to this HelmRepository. +func (in HelmRepository) GetTimeout() time.Duration { + if in.Spec.Timeout != nil { + return in.Spec.Timeout.Duration + } + return time.Minute +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1" +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmRepository is the Schema for the helmrepositories API. +type HelmRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmRepositoryStatus `json:"status,omitempty"` +} + +// HelmRepositoryList contains a list of HelmRepository objects. +// +kubebuilder:object:root=true +type HelmRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{}) +} diff --git a/api/v1beta2/ocirepository_types.go b/api/v1beta2/ocirepository_types.go new file mode 100644 index 000000000..760f0d8f1 --- /dev/null +++ b/api/v1beta2/ocirepository_types.go @@ -0,0 +1,315 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" + + apiv1 "github.com/fluxcd/source-controller/api/v1" +) + +const ( + // OCIRepositoryKind is the string representation of a OCIRepository. + OCIRepositoryKind = "OCIRepository" + + // OCIRepositoryPrefix is the prefix used for OCIRepository URLs. + OCIRepositoryPrefix = "oci://" + + // GenericOCIProvider provides support for authentication using static credentials + // for any OCI compatible API such as Docker Registry, GitHub Container Registry, + // Docker Hub, Quay, etc. + GenericOCIProvider string = "generic" + + // AmazonOCIProvider provides support for OCI authentication using AWS IRSA. + AmazonOCIProvider string = "aws" + + // GoogleOCIProvider provides support for OCI authentication using GCP workload identity. + GoogleOCIProvider string = "gcp" + + // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal, + // Managed Identity or Shared Key. + AzureOCIProvider string = "azure" + + // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer. + OCILayerExtract = "extract" + + // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer. + OCILayerCopy = "copy" +) + +// OCIRepositorySpec defines the desired state of OCIRepository +type OCIRepositorySpec struct { + // URL is a reference to an OCI artifact repository hosted + // on a remote container registry. + // +kubebuilder:validation:Pattern="^oci://.*$" + // +required + URL string `json:"url"` + + // The OCI reference to pull and monitor for changes, + // defaults to the latest tag. + // +optional + Reference *OCIRepositoryRef `json:"ref,omitempty"` + + // LayerSelector specifies which layer should be extracted from the OCI artifact. + // When not specified, the first layer found in the artifact is selected. + // +optional + LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"` + + // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // SecretRef contains the secret name containing the registry login + // credentials to resolve image metadata. + // The secret must be of type kubernetes.io/dockerconfigjson. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // +optional + Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the image pull if the service account has attached pull secrets. For more information: + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // Note: Support for the `caFile`, `certFile` and `keyFile` keys have + // been deprecated. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the OCIRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote OCI Repository operations like pulling, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// OCIRepositoryRef defines the image reference for the OCIRepository's URL +type OCIRepositoryRef struct { + // Digest is the image digest to pull, takes precedence over SemVer. + // The value should be in the format 'sha256:'. + // +optional + Digest string `json:"digest,omitempty"` + + // SemVer is the range of tags to pull selecting the latest within + // the range, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + + // Tag is the image tag to pull, defaults to latest. + // +optional + Tag string `json:"tag,omitempty"` +} + +// OCILayerSelector specifies which layer should be extracted from an OCI Artifact +type OCILayerSelector struct { + // MediaType specifies the OCI media type of the layer + // which should be extracted from the OCI Artifact. The + // first layer matching this type is selected. + // +optional + MediaType string `json:"mediaType,omitempty"` + + // Operation specifies how the selected layer should be processed. + // By default, the layer compressed content is extracted to storage. + // When the operation is set to 'copy', the layer compressed content + // is persisted to storage as it is. + // +kubebuilder:validation:Enum=extract;copy + // +optional + Operation string `json:"operation,omitempty"` +} + +// OCIRepositoryStatus defines the observed state of OCIRepository +type OCIRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the OCIRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last OCI Repository sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful OCI Repository sync. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ContentConfigChecksum is a checksum of all the configurations related to + // the content of the source artifact: + // - .spec.ignore + // - .spec.layerSelector + // observed in .status.observedGeneration version of the object. This can + // be used to determine if the content configuration has changed and the + // artifact needs to be rebuilt. + // It has the format of `:`, for example: `sha256:`. + // + // Deprecated: Replaced with explicit fields for observed artifact content + // config in the status. + // +optional + ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedLayerSelector is the observed layer selector used for constructing + // the source artifact. + // +optional + ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // OCIPullFailedReason signals that a pull operation failed. + OCIPullFailedReason string = "OCIArtifactPullFailed" + + // OCILayerOperationFailedReason signals that an OCI layer operation failed. + OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in OCIRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *OCIRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the OCIRepository must be +// reconciled again. +func (in OCIRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the OCIRepository if present in +// the status sub-resource. +func (in *OCIRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetLayerMediaType returns the media type layer selector if found in spec. +func (in *OCIRepository) GetLayerMediaType() string { + if in.Spec.LayerSelector == nil { + return "" + } + + return in.Spec.LayerSelector.MediaType +} + +// GetLayerOperation returns the layer selector operation (defaults to extract). +func (in *OCIRepository) GetLayerOperation() string { + if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" { + return OCILayerExtract + } + + return in.Spec.LayerSelector.Operation +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ocirepo +// +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1" +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// OCIRepository is the Schema for the ocirepositories API +type OCIRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OCIRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status OCIRepositoryStatus `json:"status,omitempty"` +} + +// OCIRepositoryList contains a list of OCIRepository +// +kubebuilder:object:root=true +type OCIRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OCIRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{}) +} diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go new file mode 100644 index 000000000..4111c0998 --- /dev/null +++ b/api/v1beta2/source.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // SourceIndexKey is the key used for indexing objects based on their + // referenced Source. + SourceIndexKey string = ".metadata.source" +) + +// Source interface must be supported by all API types. +// Source is the interface that provides generic access to the Artifact and +// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io +// API group. +// +// Deprecated: use the Source interface from api/v1 instead. This type will be +// removed in a future release. +// +// +k8s:deepcopy-gen=false +type Source interface { + runtime.Object + // GetRequeueAfter returns the duration after which the source must be + // reconciled again. + GetRequeueAfter() time.Duration + // GetArtifact returns the latest artifact from the source if present in + // the status sub-resource. + GetArtifact() *Artifact +} diff --git a/api/v1beta2/sts_types.go b/api/v1beta2/sts_types.go new file mode 100644 index 000000000..c07c05123 --- /dev/null +++ b/api/v1beta2/sts_types.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +const ( + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" +) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..0b874dd7e --- /dev/null +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,876 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + apiv1 "github.com/fluxcd/source-controller/api/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Artifact) DeepCopyInto(out *Artifact) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(int64) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. +func (in *Artifact) DeepCopy() *Artifact { + if in == nil { + return nil + } + out := new(Artifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepository) DeepCopyInto(out *GitRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository. +func (in *GitRepository) DeepCopy() *GitRepository { + if in == nil { + return nil + } + out := new(GitRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) { + *out = *in + out.GitRepositoryRef = in.GitRepositoryRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude. +func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude { + if in == nil { + return nil + } + out := new(GitRepositoryInclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GitRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList. +func (in *GitRepositoryList) DeepCopy() *GitRepositoryList { + if in == nil { + return nil + } + out := new(GitRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef. +func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef { + if in == nil { + return nil + } + out := new(GitRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(GitRepositoryRef) + **out = **in + } + if in.Verification != nil { + in, out := &in.Verification, &out.Verification + *out = new(GitRepositoryVerification) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. +func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec { + if in == nil { + return nil + } + out := new(GitRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.IncludedArtifacts != nil { + in, out := &in.IncludedArtifacts, &out.IncludedArtifacts + *out = make([]*meta.Artifact, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + } + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedInclude != nil { + in, out := &in.ObservedInclude, &out.ObservedInclude + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus. +func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus { + if in == nil { + return nil + } + out := new(GitRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification. +func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification { + if in == nil { + return nil + } + out := new(GitRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChart) DeepCopyInto(out *HelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart. +func (in *HelmChart) DeepCopy() *HelmChart { + if in == nil { + return nil + } + out := new(HelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartList) DeepCopyInto(out *HelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList. +func (in *HelmChartList) DeepCopy() *HelmChartList { + if in == nil { + return nil + } + out := new(HelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { + *out = *in + out.SourceRef = in.SourceRef + out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(apiv1.OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. +func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { + if in == nil { + return nil + } + out := new(HelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { + *out = *in + if in.ObservedValuesFiles != nil { + in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus. +func (in *HelmChartStatus) DeepCopy() *HelmChartStatus { + if in == nil { + return nil + } + out := new(HelmChartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList. +func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList { + if in == nil { + return nil + } + out := new(HelmRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. +func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec { + if in == nil { + return nil + } + out := new(HelmRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus. +func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference. +func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference { + if in == nil { + return nil + } + out := new(LocalHelmChartSourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector. +func (in *OCILayerSelector) DeepCopy() *OCILayerSelector { + if in == nil { + return nil + } + out := new(OCILayerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepository) DeepCopyInto(out *OCIRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository. +func (in *OCIRepository) DeepCopy() *OCIRepository { + if in == nil { + return nil + } + out := new(OCIRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList. +func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList { + if in == nil { + return nil + } + out := new(OCIRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef. +func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef { + if in == nil { + return nil + } + out := new(OCIRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(OCIRepositoryRef) + **out = **in + } + if in.LayerSelector != nil { + in, out := &in.LayerSelector, &out.LayerSelector + *out = new(OCILayerSelector) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(apiv1.OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec. +func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec { + if in == nil { + return nil + } + out := new(OCIRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedLayerSelector != nil { + in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector + *out = new(OCILayerSelector) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus. +func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { + if in == nil { + return nil + } + out := new(OCIRepositoryStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index d3e59eb27..f578c8da0 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: buckets.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -17,161 +15,702 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .spec.url - name: URL + - jsonPath: .spec.endpoint + name: Endpoint type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + name: v1 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. + properties: + bucketName: + description: BucketName is the name of the object storage bucket. + type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `generic` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: Endpoint is the object storage address the BucketName + is located at. + type: string + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP Endpoint. + type: boolean + interval: + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + prefix: + description: Prefix to use for server-side filtering of files in the + Bucket. + type: string + provider: + default: generic + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. + enum: + - generic + - aws + - gcp + - azure + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + region: + description: Region of the Endpoint where the BucketName is located + in. + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the Bucket. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the bucket. This field is only supported for the 'gcp' and 'aws' providers. + For more information about workload identity: + https://fluxcd.io/flux/components/source/buckets/#workload-identity + type: string + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object + suspend: + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. + type: boolean + timeout: + default: 60s + description: Timeout for fetch operations, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + required: + - bucketName + - endpoint + - interval + type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' + - message: ServiceAccountName is not supported for the 'generic' Bucket + provider + rule: self.provider != 'generic' || !has(self.serviceAccountName) + - message: cannot set both .spec.secretRef and .spec.serviceAccountName + rule: '!has(self.secretRef) || !has(self.serviceAccountName)' + status: + default: + observedGeneration: -1 + description: BucketStatus records the observed state of a Bucket. + properties: + artifact: + description: Artifact represents the last successful Bucket reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the Bucket. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation of + the Bucket object. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + url: + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.endpoint + name: Endpoint + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta1 + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + deprecated: true + deprecationWarning: v1beta2 Bucket is deprecated, upgrade to v1 + name: v1beta2 schema: openAPIV3Schema: - description: Bucket is the Schema for the buckets API + description: Bucket is the Schema for the buckets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: BucketSpec defines the desired state of an S3 compatible - bucket + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. properties: + accessFrom: + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + properties: + namespaceSelectors: + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. + items: + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object bucketName: - description: The bucket name. + description: BucketName is the name of the object storage bucket. type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `generic` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object endpoint: - description: The bucket endpoint address. + description: Endpoint is the object storage address the BucketName + is located at. type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string insecure: - description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. + description: Insecure allows connecting to a non-TLS HTTP Endpoint. type: boolean interval: - description: The interval at which to check for bucket updates. + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + prefix: + description: Prefix to use for server-side filtering of files in the + Bucket. type: string provider: default: generic - description: The S3 compatible storage provider name, default ('generic'). + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. enum: - generic - aws + - gcp + - azure type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object region: - description: The bucket region. + description: Region of the Endpoint where the BucketName is located + in. type: string secretRef: - description: The name of the secret containing authentication credentials + description: |- + SecretRef specifies the Secret containing authentication credentials for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. type: boolean timeout: - default: 20s - description: The timeout for download operations, defaults to 20s. + default: 60s + description: Timeout for fetch operations, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string required: - bucketName - endpoint - interval type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' status: - description: BucketStatus defines the observed state of a bucket + default: + observedGeneration: -1 + description: BucketStatus records the observed state of a Bucket. properties: artifact: - description: Artifact represents the output of the last successful - Bucket sync. + description: Artifact represents the last successful Bucket reconciliation. properties: - checksum: - description: Checksum is the SHA1 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -186,10 +725,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -202,26 +737,30 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: ObservedGeneration is the last observed generation of + the Bucket object. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string url: - description: URL is the download link for the artifact output of the - last Bucket sync. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml new file mode 100644 index 000000000..23cdf63c3 --- /dev/null +++ b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: externalartifacts.source.toolkit.fluxcd.io +spec: + group: source.toolkit.fluxcd.io + names: + kind: ExternalArtifact + listKind: ExternalArtifactList + plural: externalartifacts + singular: externalartifact + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .spec.sourceRef.name + name: Source + type: string + name: v1 + schema: + openAPIV3Schema: + description: ExternalArtifact is the Schema for the external artifacts API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExternalArtifactSpec defines the desired state of ExternalArtifact + properties: + sourceRef: + description: |- + SourceRef points to the Kubernetes custom resource for + which the artifact is generated. + properties: + apiVersion: + description: API version of the referent, if not specified the + Kubernetes preferred version will be used. + type: string + kind: + description: Kind of the referent. + type: string + name: + description: Name of the referent. + type: string + namespace: + description: Namespace of the referent, when not specified it + acts as LocalObjectReference. + type: string + required: + - kind + - name + type: object + type: object + status: + description: ExternalArtifactStatus defines the observed state of ExternalArtifact + properties: + artifact: + description: Artifact represents the output of an ExternalArtifact + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the ExternalArtifact. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 75e8f1614..10663e473 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: gitrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -13,6 +11,8 @@ spec: kind: GitRepository listKind: GitRepositoryList plural: gitrepositories + shortNames: + - gitrepo singular: gitrepository scope: Namespaced versions: @@ -20,195 +20,772 @@ spec: - jsonPath: .spec.url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + name: v1 + schema: + openAPIV3Schema: + description: GitRepository is the Schema for the gitrepositories API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + GitRepositorySpec specifies the required configuration to produce an + Artifact for a Git repository. + properties: + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + include: + description: |- + Include specifies a list of GitRepository resources which Artifacts + should be included in the Artifact produced for this GitRepository. + items: + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. + properties: + fromPath: + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. + type: string + repository: + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. + type: string + required: + - repository + type: object + type: array + interval: + description: |- + Interval at which the GitRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + provider: + description: |- + Provider used for authentication, can be 'azure', 'github', 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - azure + - github + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Git server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + recurseSubmodules: + description: |- + RecurseSubmodules enables the initialization of all submodules within + the GitRepository as cloned from the URL, using their default settings. + type: boolean + ref: + description: |- + Reference specifies the Git reference to resolve and monitor for + changes, defaults to the 'master' branch. + properties: + branch: + description: Branch to check out, defaults to 'master' if no other + field is defined. + type: string + commit: + description: |- + Commit SHA to check out, takes precedence over all reference fields. + + This can be combined with Branch to shallow clone the branch, in which + the commit is expected to exist. + type: string + name: + description: |- + Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + + It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" + type: string + semver: + description: SemVer tag expression to check out, takes precedence + over Tag. + type: string + tag: + description: Tag to check out, takes precedence over Branch. + type: string + type: object + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials for + the GitRepository. + For HTTPS repositories the Secret must contain 'username' and 'password' + fields for basic auth or 'bearerToken' field for token auth. + For SSH repositories the Secret must contain 'identity' + and 'known_hosts' fields. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to + authenticate to the GitRepository. This field is only supported for 'azure' provider. + type: string + sparseCheckout: + description: |- + SparseCheckout specifies a list of directories to checkout when cloning + the repository. If specified, only these directories are included in the + Artifact produced for this GitRepository. + items: + type: string + type: array + suspend: + description: |- + Suspend tells the controller to suspend the reconciliation of this + GitRepository. + type: boolean + timeout: + default: 60s + description: Timeout for Git operations like cloning, defaults to + 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + url: + description: URL specifies the Git repository URL, it can be an HTTP/S + or SSH address. + pattern: ^(http|https|ssh)://.*$ + type: string + verify: + description: |- + Verification specifies the configuration to verify the Git commit + signature(s). + properties: + mode: + default: HEAD + description: |- + Mode specifies which Git object(s) should be verified. + + The variants "head" and "HEAD" both imply the same thing, i.e. verify + the commit that the HEAD of the Git repository points to. The variant + "head" solely exists to ensure backwards compatibility. + enum: + - head + - HEAD + - Tag + - TagAndHEAD + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing the public keys of trusted Git + authors. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - secretRef + type: object + required: + - interval + - url + type: object + x-kubernetes-validations: + - message: serviceAccountName can only be set when provider is 'azure' + rule: '!has(self.serviceAccountName) || (has(self.provider) && self.provider + == ''azure'')' + status: + default: + observedGeneration: -1 + description: GitRepositoryStatus records the observed state of a Git repository. + properties: + artifact: + description: Artifact represents the last successful GitRepository + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the GitRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + includedArtifacts: + description: |- + IncludedArtifacts contains a list of the last successfully included + Artifacts as instructed by GitRepositorySpec.Include. + items: + description: Artifact represents the output of a Source reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of + ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI + annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: |- + ObservedGeneration is the last observed generation of the GitRepository + object. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedInclude: + description: |- + ObservedInclude is the observed list of GitRepository resources used to + produce the current Artifact. + items: + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. + properties: + fromPath: + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. + type: string + repository: + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. + type: string + required: + - repository + type: object + type: array + observedRecurseSubmodules: + description: |- + ObservedRecurseSubmodules is the observed resource submodules + configuration used to produce the current Artifact. + type: boolean + observedSparseCheckout: + description: |- + ObservedSparseCheckout is the observed list of directories used to + produce the current Artifact. + items: + type: string + type: array + sourceVerificationMode: + description: |- + SourceVerificationMode is the last used verification mode indicating + which Git object(s) have been verified. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta1 + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + deprecated: true + deprecationWarning: v1beta2 GitRepository is deprecated, upgrade to v1 + name: v1beta2 schema: openAPIV3Schema: - description: GitRepository is the Schema for the gitrepositories API + description: GitRepository is the Schema for the gitrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: GitRepositorySpec defines the desired state of a Git repository. + description: |- + GitRepositorySpec specifies the required configuration to produce an + Artifact for a Git repository. properties: + accessFrom: + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + properties: + namespaceSelectors: + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. + items: + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object gitImplementation: default: go-git - description: Determines which git client library to use. Defaults - to go-git, valid values are ('go-git', 'libgit2'). + description: |- + GitImplementation specifies which Git client library implementation to + use. Defaults to 'go-git', valid values are ('go-git', 'libgit2'). + Deprecated: gitImplementation is deprecated now that 'go-git' is the + only supported implementation. enum: - go-git - libgit2 type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string + include: + description: |- + Include specifies a list of GitRepository resources which Artifacts + should be included in the Artifact produced for this GitRepository. + items: + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. + properties: + fromPath: + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. + type: string + repository: + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. + type: string + required: + - repository + type: object + type: array interval: - description: The interval at which to check for repository updates. + description: Interval at which to check the GitRepository for updates. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string + recurseSubmodules: + description: |- + RecurseSubmodules enables the initialization of all submodules within + the GitRepository as cloned from the URL, using their default settings. + type: boolean ref: - description: The Git reference to checkout and monitor for changes, - defaults to master branch. + description: |- + Reference specifies the Git reference to resolve and monitor for + changes, defaults to the 'master' branch. properties: branch: - default: master - description: The Git branch to checkout, defaults to master. + description: Branch to check out, defaults to 'master' if no other + field is defined. type: string commit: - description: The Git commit SHA to checkout, if specified Tag - filters will be ignored. + description: |- + Commit SHA to check out, takes precedence over all reference fields. + + This can be combined with Branch to shallow clone the branch, in which + the commit is expected to exist. + type: string + name: + description: |- + Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + + It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string semver: - description: The Git tag semver expression, takes precedence over - Tag. + description: SemVer tag expression to check out, takes precedence + over Tag. type: string tag: - description: The Git tag to checkout, takes precedence over Branch. + description: Tag to check out, takes precedence over Branch. type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS - repositories the secret must contain username and password fields. - For SSH repositories the secret must contain identity, identity.pub - and known_hosts fields. + description: |- + SecretRef specifies the Secret containing authentication credentials for + the GitRepository. + For HTTPS repositories the Secret must contain 'username' and 'password' + fields for basic auth or 'bearerToken' field for token auth. + For SSH repositories the Secret must contain 'identity' + and 'known_hosts' fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + GitRepository. type: boolean timeout: - default: 20s - description: The timeout for remote Git operations like cloning, defaults - to 20s. + default: 60s + description: Timeout for Git operations like cloning, defaults to + 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string url: - description: The repository URL, can be a HTTP/S or SSH address. - pattern: ^(http|https|ssh):// + description: URL specifies the Git repository URL, it can be an HTTP/S + or SSH address. + pattern: ^(http|https|ssh)://.*$ type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points - to. + description: |- + Verification specifies the configuration to verify the Git commit + signature(s). properties: mode: - description: Mode describes what git object should be verified, + description: Mode specifies what Git object should be verified, currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all - trusted Git authors. + description: |- + SecretRef specifies the Secret containing the public keys of trusted Git + authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object required: - mode + - secretRef type: object required: - interval - url type: object status: - description: GitRepositoryStatus defines the observed state of a Git repository. + default: + observedGeneration: -1 + description: GitRepositoryStatus records the observed state of a Git repository. properties: artifact: - description: Artifact represents the output of the last successful - repository sync. + description: Artifact represents the last successful GitRepository + reconciliation. properties: - checksum: - description: Checksum is the SHA1 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -223,10 +800,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -238,27 +811,139 @@ spec: - type type: object type: array + contentConfigChecksum: + description: |- + ContentConfigChecksum is a checksum of all the configurations related to + the content of the source artifact: + - .spec.ignore + - .spec.recurseSubmodules + - .spec.included and the checksum of the included artifacts + observed in .status.observedGeneration version of the object. This can + be used to determine if the content of the included repository has + changed. + It has the format of `:`, for example: `sha256:`. + + Deprecated: Replaced with explicit fields for observed artifact content + config in the status. + type: string + includedArtifacts: + description: |- + IncludedArtifacts contains a list of the last successfully included + Artifacts as instructed by GitRepositorySpec.Include. + items: + description: Artifact represents the output of a Source reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of + ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI + annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the GitRepository + object. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedInclude: + description: |- + ObservedInclude is the observed list of GitRepository resources used to + to produce the current Artifact. + items: + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. + properties: + fromPath: + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. + type: string + repository: + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. + type: string + required: + - repository + type: object + type: array + observedRecurseSubmodules: + description: |- + ObservedRecurseSubmodules is the observed resource submodules + configuration used to produce the current Artifact. + type: boolean url: - description: URL is the download link for the artifact output of the - last repository sync. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + GitRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index c7da4899b..0e57c72a5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: helmcharts.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -13,6 +11,8 @@ spec: kind: HelmChart listKind: HelmChartList plural: helmcharts + shortNames: + - hc singular: helmchart scope: Namespaced versions: @@ -29,51 +29,442 @@ spec: - jsonPath: .spec.sourceRef.name name: Source Name type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + name: v1 + schema: + openAPIV3Schema: + description: HelmChart is the Schema for the helmcharts API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HelmChartSpec specifies the desired state of a Helm chart. + properties: + chart: + description: |- + Chart is the name or path the Helm chart is available at in the + SourceRef. + type: string + ignoreMissingValuesFiles: + description: |- + IgnoreMissingValuesFiles controls whether to silently ignore missing values + files rather than failing. + type: boolean + interval: + description: |- + Interval at which the HelmChart SourceRef is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + reconcileStrategy: + default: ChartVersion + description: |- + ReconcileStrategy determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). + See the documentation of the values for an explanation on their behavior. + Defaults to ChartVersion when omitted. + enum: + - ChartVersion + - Revision + type: string + sourceRef: + description: SourceRef is the reference to the Source the chart is + available at. + properties: + apiVersion: + description: APIVersion of the referent. + type: string + kind: + description: |- + Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + 'Bucket'). + enum: + - HelmRepository + - GitRepository + - Bucket + type: string + name: + description: Name of the referent. + type: string + required: + - kind + - name + type: object + suspend: + description: |- + Suspend tells the controller to suspend the reconciliation of this + source. + type: boolean + valuesFiles: + description: |- + ValuesFiles is an alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be a + relative path in the SourceRef. + Values files are merged in the order of this list with the last file + overriding the first. Ignored when omitted. + items: + type: string + type: array + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + This field is only supported when using HelmRepository source with spec.type 'oci'. + Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object + version: + default: '*' + description: |- + Version is the chart version semver expression, ignored for charts from + GitRepository and Bucket sources. Defaults to latest when omitted. + type: string + required: + - chart + - interval + - sourceRef + type: object + status: + default: + observedGeneration: -1 + description: HelmChartStatus records the observed state of the HelmChart. + properties: + artifact: + description: Artifact represents the output of the last successful + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the HelmChart. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedChartName: + description: |- + ObservedChartName is the last observed chart name as specified by the + resolved chart reference. + type: string + observedGeneration: + description: |- + ObservedGeneration is the last observed generation of the HelmChart + object. + format: int64 + type: integer + observedSourceArtifactRevision: + description: |- + ObservedSourceArtifactRevision is the last observed Artifact.Revision + of the HelmChartSpec.SourceRef. + type: string + observedValuesFiles: + description: |- + ObservedValuesFiles are the observed value files of the last successful + reconciliation. + It matches the chart in the last successfully reconciled artifact. + items: + type: string + type: array + url: + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.chart + name: Chart + type: string + - jsonPath: .spec.version + name: Version + type: string + - jsonPath: .spec.sourceRef.kind + name: Source Kind + type: string + - jsonPath: .spec.sourceRef.name + name: Source Name + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta1 + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + deprecated: true + deprecationWarning: v1beta2 HelmChart is deprecated, upgrade to v1 + name: v1beta2 schema: openAPIV3Schema: - description: HelmChart is the Schema for the helmcharts API + description: HelmChart is the Schema for the helmcharts API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmChartSpec defines the desired state of a Helm chart. + description: HelmChartSpec specifies the desired state of a Helm chart. properties: + accessFrom: + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + properties: + namespaceSelectors: + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. + items: + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object chart: - description: The name or path the Helm chart is available at in the + description: |- + Chart is the name or path the Helm chart is available at in the SourceRef. type: string + ignoreMissingValuesFiles: + description: |- + IgnoreMissingValuesFiles controls whether to silently ignore missing values + files rather than failing. + type: boolean interval: - description: The interval at which to check the Source for updates. + description: |- + Interval at which the HelmChart SourceRef is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + reconcileStrategy: + default: ChartVersion + description: |- + ReconcileStrategy determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). + See the documentation of the values for an explanation on their behavior. + Defaults to ChartVersion when omitted. + enum: + - ChartVersion + - Revision type: string sourceRef: - description: The reference to the Source the chart is available at. + description: SourceRef is the reference to the Source the chart is + available at. properties: apiVersion: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', - 'GitRepository', 'Bucket'). + description: |- + Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + 'Bucket'). enum: - HelmRepository - GitRepository @@ -87,17 +478,90 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + source. type: boolean valuesFile: - description: Alternative values file to use as the default chart values, - expected to be a relative path in the SourceRef. Ignored when omitted. + description: |- + ValuesFile is an alternative values file to use as the default chart + values, expected to be a relative path in the SourceRef. Deprecated in + favor of ValuesFiles, for backwards compatibility the file specified here + is merged before the ValuesFiles items. Ignored when omitted. type: string + valuesFiles: + description: |- + ValuesFiles is an alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be a + relative path in the SourceRef. + Values files are merged in the order of this list with the last file + overriding the first. Ignored when omitted. + items: + type: string + type: array + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + This field is only supported when using HelmRepository source with spec.type 'oci'. + Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object version: default: '*' - description: The chart version semver expression, ignored for charts - from GitRepository and Bucket sources. Defaults to latest when omitted. + description: |- + Version is the chart version semver expression, ignored for charts from + GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -105,76 +569,89 @@ spec: - sourceRef type: object status: - description: HelmChartStatus defines the observed state of the HelmChart. + default: + observedGeneration: -1 + description: HelmChartStatus records the observed state of the HelmChart. properties: artifact: description: Artifact represents the output of the last successful - chart sync. + reconciliation. properties: - checksum: - description: Checksum is the SHA1 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -189,10 +666,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -205,25 +678,44 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedChartName: + description: |- + ObservedChartName is the last observed chart name as specified by the + resolved chart reference. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the HelmChart + object. format: int64 type: integer + observedSourceArtifactRevision: + description: |- + ObservedSourceArtifactRevision is the last observed Artifact.Revision + of the HelmChartSpec.SourceRef. + type: string + observedValuesFiles: + description: |- + ObservedValuesFiles are the observed value files of the last successful + reconciliation. + It matches the chart in the last successfully reconciled artifact. + items: + type: string + type: array url: - description: URL is the download link for the last chart pulled. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index a6b38b95d..750a36500 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: helmrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -13,6 +11,8 @@ spec: kind: HelmRepository listKind: HelmRepositoryList plural: helmrepositories + shortNames: + - helmrepo singular: helmrepository scope: Namespaced versions: @@ -20,137 +20,561 @@ spec: - jsonPath: .spec.url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + name: v1 + schema: + openAPIV3Schema: + description: HelmRepository is the Schema for the helmrepositories API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + HelmRepositorySpec specifies the required configuration to produce an + Artifact for a Helm repository index YAML. + properties: + accessFrom: + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + properties: + namespaceSelectors: + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. + items: + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + It takes precedence over the values specified in the Secret referred + to by `.spec.secretRef`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + insecure: + description: |- + Insecure allows connecting to a non-TLS HTTP container registry. + This field is only taken into account if the .spec.type field is set to 'oci'. + type: boolean + interval: + description: |- + Interval at which the HelmRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + passCredentials: + description: |- + PassCredentials allows the credentials from the SecretRef to be passed + on to a host that does not match the host as defined in URL. + This may be required if the host of the advertised chart URLs in the + index differ from the defined URL. + Enabling this should be done with caution, as it can potentially result + in credentials getting stolen in a MITM-attack. + type: boolean + provider: + default: generic + description: |- + Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the HelmRepository. + For HTTP/S basic auth the secret must contain 'username' and 'password' + fields. + Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + keys is deprecated. Please use `.spec.certSecretRef` instead. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + suspend: + description: |- + Suspend tells the controller to suspend the reconciliation of this + HelmRepository. + type: boolean + timeout: + description: |- + Timeout is used for the index fetch operation for an HTTPS helm repository, + and for remote OCI Repository operations like pulling for an OCI helm + chart by the associated HelmChart. + Its default value is 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + type: + description: |- + Type of the HelmRepository. + When this field is set to "oci", the URL field value must be prefixed with "oci://". + enum: + - default + - oci + type: string + url: + description: |- + URL of the Helm repository, a valid URL contains at least a protocol and + host. + pattern: ^(http|https|oci)://.*$ + type: string + required: + - url + type: object + status: + default: + observedGeneration: -1 + description: HelmRepositoryStatus records the observed state of the HelmRepository. + properties: + artifact: + description: Artifact represents the last successful HelmRepository + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the HelmRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: |- + ObservedGeneration is the last observed generation of the HelmRepository + object. + format: int64 + type: integer + url: + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + HelmRepositoryStatus.Artifact data is recommended. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta1 + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + deprecated: true + deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1 + name: v1beta2 schema: openAPIV3Schema: - description: HelmRepository is the Schema for the helmrepositories API + description: HelmRepository is the Schema for the helmrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmRepositorySpec defines the reference to a Helm repository. + description: |- + HelmRepositorySpec specifies the required configuration to produce an + Artifact for a Helm repository index YAML. properties: + accessFrom: + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + properties: + namespaceSelectors: + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. + items: + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + It takes precedence over the values specified in the Secret referred + to by `.spec.secretRef`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + insecure: + description: |- + Insecure allows connecting to a non-TLS HTTP container registry. + This field is only taken into account if the .spec.type field is set to 'oci'. + type: boolean interval: - description: The interval at which to check the upstream for updates. + description: |- + Interval at which the HelmRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + passCredentials: + description: |- + PassCredentials allows the credentials from the SecretRef to be passed + on to a host that does not match the host as defined in URL. + This may be required if the host of the advertised chart URLs in the + index differ from the defined URL. + Enabling this should be done with caution, as it can potentially result + in credentials getting stolen in a MITM-attack. + type: boolean + provider: + default: generic + description: |- + Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp type: string secretRef: - description: The name of the secret containing authentication credentials - for the Helm repository. For HTTP/S basic auth the secret must contain - username and password fields. For TLS the secret must contain a - certFile and keyFile, and/or caCert fields. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the HelmRepository. + For HTTP/S basic auth the secret must contain 'username' and 'password' + fields. + Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + keys is deprecated. Please use `.spec.certSecretRef` instead. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + HelmRepository. type: boolean timeout: - default: 60s - description: The timeout of index downloading, defaults to 60s. + description: |- + Timeout is used for the index fetch operation for an HTTPS helm repository, + and for remote OCI Repository operations like pulling for an OCI helm + chart by the associated HelmChart. + Its default value is 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + type: + description: |- + Type of the HelmRepository. + When this field is set to "oci", the URL field value must be prefixed with "oci://". + enum: + - default + - oci type: string url: - description: The Helm repository URL, a valid URL contains at least - a protocol and host. + description: |- + URL of the Helm repository, a valid URL contains at least a protocol and + host. + pattern: ^(http|https|oci)://.*$ type: string required: - - interval - url type: object status: - description: HelmRepositoryStatus defines the observed state of the HelmRepository. + default: + observedGeneration: -1 + description: HelmRepositoryStatus records the observed state of the HelmRepository. properties: artifact: - description: Artifact represents the output of the last successful - repository sync. + description: Artifact represents the last successful HelmRepository + reconciliation. properties: - checksum: - description: Checksum is the SHA1 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -165,10 +589,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -181,25 +601,26 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the HelmRepository + object. format: int64 type: integer url: - description: URL is the download link for the last index fetched. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + HelmRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml new file mode 100644 index 000000000..05b7b96ab --- /dev/null +++ b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml @@ -0,0 +1,823 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: ocirepositories.source.toolkit.fluxcd.io +spec: + group: source.toolkit.fluxcd.io + names: + kind: OCIRepository + listKind: OCIRepositoryList + plural: ocirepositories + shortNames: + - ocirepo + singular: ocirepository + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: OCIRepository is the Schema for the ocirepositories API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OCIRepositorySpec defines the desired state of OCIRepository + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP container + registry. + type: boolean + interval: + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + layerSelector: + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + provider: + default: generic + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ref: + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. + properties: + digest: + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. + type: string + semver: + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. + type: string + tag: + description: Tag is the image tag to pull, defaults to latest. + type: string + type: object + secretRef: + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + type: string + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote OCI Repository operations like + pulling, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + url: + description: |- + URL is a reference to an OCI artifact repository hosted + on a remote container registry. + pattern: ^oci://.*$ + type: string + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: OCIRepositoryStatus defines the observed state of OCIRepository + properties: + artifact: + description: Artifact represents the output of the last successful + OCI Repository sync. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the OCIRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedLayerSelector: + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + url: + description: URL is the download link for the artifact output of the + last OCI Repository sync. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1 + name: v1beta2 + schema: + openAPIV3Schema: + description: OCIRepository is the Schema for the ocirepositories API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OCIRepositorySpec defines the desired state of OCIRepository + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + Note: Support for the `caFile`, `certFile` and `keyFile` keys have + been deprecated. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP container + registry. + type: boolean + interval: + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + layerSelector: + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + provider: + default: generic + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ref: + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. + properties: + digest: + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. + type: string + semver: + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. + type: string + tag: + description: Tag is the image tag to pull, defaults to latest. + type: string + type: object + secretRef: + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + type: string + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote OCI Repository operations like + pulling, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + url: + description: |- + URL is a reference to an OCI artifact repository hosted + on a remote container registry. + pattern: ^oci://.*$ + type: string + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: OCIRepositoryStatus defines the observed state of OCIRepository + properties: + artifact: + description: Artifact represents the output of the last successful + OCI Repository sync. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the OCIRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + contentConfigChecksum: + description: |- + ContentConfigChecksum is a checksum of all the configurations related to + the content of the source artifact: + - .spec.ignore + - .spec.layerSelector + observed in .status.observedGeneration version of the object. This can + be used to determine if the content configuration has changed and the + artifact needs to be rebuilt. + It has the format of `:`, for example: `sha256:`. + + Deprecated: Replaced with explicit fields for observed artifact content + config in the status. + type: string + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedLayerSelector: + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + url: + description: URL is the download link for the artifact output of the + last OCI Repository sync. + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index a666a9259..2a09dbfd5 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,4 +5,6 @@ resources: - bases/source.toolkit.fluxcd.io_helmrepositories.yaml - bases/source.toolkit.fluxcd.io_helmcharts.yaml - bases/source.toolkit.fluxcd.io_buckets.yaml +- bases/source.toolkit.fluxcd.io_ocirepositories.yaml +- bases/source.toolkit.fluxcd.io_externalartifacts.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manager/deployment.yaml b/config/manager/deployment.yaml index 50d3f4d50..e354b00e3 100644 --- a/config/manager/deployment.yaml +++ b/config/manager/deployment.yaml @@ -9,6 +9,8 @@ spec: matchLabels: app: source-controller replicas: 1 + strategy: + type: Recreate template: metadata: labels: @@ -18,6 +20,10 @@ spec: prometheus.io/port: "8080" spec: terminationGracePeriodSeconds: 10 + securityContext: + # Required for AWS IAM Role bindings + # https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html + fsGroup: 1337 containers: - name: manager image: fluxcd/source-controller @@ -25,16 +31,28 @@ spec: securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: [ "ALL" ] + seccompProfile: + type: RuntimeDefault ports: - containerPort: 9090 name: http + protocol: TCP - containerPort: 8080 name: http-prom + protocol: TCP + - containerPort: 9440 + name: healthz + protocol: TCP env: - name: RUNTIME_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace + - name: TUF_ROOT # store the Fulcio root CA file in tmp + value: "/tmp/.sigstore" args: - --watch-all-namespaces - --log-level=info @@ -44,8 +62,8 @@ spec: - --storage-adv-addr=source-controller.$(RUNTIME_NAMESPACE).svc.cluster.local. livenessProbe: httpGet: - port: http - path: / + port: healthz + path: /healthz readinessProbe: httpGet: port: http diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 255404bc8..0118ce85b 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,4 +6,4 @@ resources: images: - name: fluxcd/source-controller newName: fluxcd/source-controller - newTag: v0.7.0 + newTag: v1.7.0 diff --git a/config/rbac/externalartifact_editor_role.yaml b/config/rbac/externalartifact_editor_role.yaml new file mode 100644 index 000000000..ded6c1d93 --- /dev/null +++ b/config/rbac/externalartifact_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifact-editor-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/externalartifact_viewer_role.yaml b/config/rbac/externalartifact_viewer_role.yaml new file mode 100644 index 000000000..d0c1d507f --- /dev/null +++ b/config/rbac/externalartifact_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifacts-viewer-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/ocirepository_editor_role.yaml b/config/rbac/ocirepository_editor_role.yaml new file mode 100644 index 000000000..e4defde09 --- /dev/null +++ b/config/rbac/ocirepository_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ocirepositories. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ocirepository-editor-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - ocirepositories + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - ocirepositories/status + verbs: + - get diff --git a/config/rbac/ocirepository_viewer_role.yaml b/config/rbac/ocirepository_viewer_role.yaml new file mode 100644 index 000000000..f769ac5a9 --- /dev/null +++ b/config/rbac/ocirepository_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ocirepositories. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ocirepository-viewer-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - ocirepositories + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - ocirepositories/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 8cf5c66a0..d2cd9e7cb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,9 +1,7 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: @@ -17,104 +15,25 @@ rules: - "" resources: - secrets + - serviceaccounts verbs: - get - list - watch - apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io + - "" resources: - - buckets/finalizers + - serviceaccounts/token verbs: - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/status - verbs: - - get - - patch - - update - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets - gitrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmcharts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmrepositories + - ocirepositories verbs: - create - delete @@ -126,7 +45,11 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/finalizers + - gitrepositories/finalizers + - helmcharts/finalizers - helmrepositories/finalizers + - ocirepositories/finalizers verbs: - create - delete @@ -136,7 +59,11 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/status + - gitrepositories/status + - helmcharts/status - helmrepositories/status + - ocirepositories/status verbs: - get - patch diff --git a/config/samples/source_v1beta1_bucket.yaml b/config/samples/source_v1_bucket.yaml similarity index 81% rename from config/samples/source_v1beta1_bucket.yaml rename to config/samples/source_v1_bucket.yaml index e536d45c4..f09cbe213 100644 --- a/config/samples/source_v1beta1_bucket.yaml +++ b/config/samples/source_v1_bucket.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: bucket-sample diff --git a/config/samples/source_v1beta1_gitrepository.yaml b/config/samples/source_v1_gitrepository.yaml similarity index 77% rename from config/samples/source_v1beta1_gitrepository.yaml rename to config/samples/source_v1_gitrepository.yaml index 9719fd213..27fad9a25 100644 --- a/config/samples/source_v1beta1_gitrepository.yaml +++ b/config/samples/source_v1_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: gitrepository-sample diff --git a/config/samples/source_v1beta1_helmchart_gitrepository.yaml b/config/samples/source_v1_helmchart_gitrepository.yaml similarity index 78% rename from config/samples/source_v1beta1_helmchart_gitrepository.yaml rename to config/samples/source_v1_helmchart_gitrepository.yaml index 0a8db7c6f..680e7b184 100644 --- a/config/samples/source_v1beta1_helmchart_gitrepository.yaml +++ b/config/samples/source_v1_helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-git-sample diff --git a/config/samples/source_v1_helmchart_helmrepository-oci.yaml b/config/samples/source_v1_helmchart_helmrepository-oci.yaml new file mode 100644 index 000000000..d9dd3279d --- /dev/null +++ b/config/samples/source_v1_helmchart_helmrepository-oci.yaml @@ -0,0 +1,11 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: helmchart-sample-oci +spec: + chart: stefanprodan/charts/podinfo + version: '>=6.0.0 <7.0.0' + sourceRef: + kind: HelmRepository + name: helmrepository-sample-oci + interval: 1m diff --git a/config/samples/source_v1beta1_helmchart_helmrepository.yaml b/config/samples/source_v1_helmchart_helmrepository.yaml similarity index 63% rename from config/samples/source_v1beta1_helmchart_helmrepository.yaml rename to config/samples/source_v1_helmchart_helmrepository.yaml index 07cd3b5d0..d1b43fe3e 100644 --- a/config/samples/source_v1beta1_helmchart_helmrepository.yaml +++ b/config/samples/source_v1_helmchart_helmrepository.yaml @@ -1,11 +1,12 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-sample spec: chart: podinfo - version: '>=2.0.0 <3.0.0' + version: '6.x' sourceRef: kind: HelmRepository name: helmrepository-sample interval: 1m + ignoreMissingValuesFiles: true diff --git a/config/samples/source_v1_helmrepository-oci.yaml b/config/samples/source_v1_helmrepository-oci.yaml new file mode 100644 index 000000000..458dc73c2 --- /dev/null +++ b/config/samples/source_v1_helmrepository-oci.yaml @@ -0,0 +1,8 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: helmrepository-sample-oci +spec: + interval: 1m + type: oci + url: oci://ghcr.io/ diff --git a/config/samples/source_v1beta1_helmrepository.yaml b/config/samples/source_v1_helmrepository.yaml similarity index 73% rename from config/samples/source_v1beta1_helmrepository.yaml rename to config/samples/source_v1_helmrepository.yaml index 6a6e65f45..b7049cc0a 100644 --- a/config/samples/source_v1beta1_helmrepository.yaml +++ b/config/samples/source_v1_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: helmrepository-sample diff --git a/config/samples/source_v1_ocirepository.yaml b/config/samples/source_v1_ocirepository.yaml new file mode 100644 index 000000000..69fb19e2a --- /dev/null +++ b/config/samples/source_v1_ocirepository.yaml @@ -0,0 +1,9 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: ocirepository-sample +spec: + interval: 1m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + tag: 6.1.6 diff --git a/config/testdata/bucket/source.yaml b/config/testdata/bucket/source.yaml index 459e7400a..bd3097ee2 100644 --- a/config/testdata/bucket/source.yaml +++ b/config/testdata/bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: podinfo diff --git a/config/testdata/git/large-repo.yaml b/config/testdata/git/large-repo.yaml new file mode 100644 index 000000000..ad3defd68 --- /dev/null +++ b/config/testdata/git/large-repo.yaml @@ -0,0 +1,10 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: large-repo +spec: + interval: 10m + timeout: 2m + url: https://github.com/nodejs/node.git + ref: + branch: main diff --git a/config/testdata/helmchart-from-bucket/source.yaml b/config/testdata/helmchart-from-bucket/source.yaml index 0609cf541..814305d13 100644 --- a/config/testdata/helmchart-from-bucket/source.yaml +++ b/config/testdata/helmchart-from-bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: charts @@ -13,7 +13,7 @@ spec: secretRef: name: minio-credentials --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-bucket diff --git a/config/testdata/helmchart-from-oci/notation.yaml b/config/testdata/helmchart-from-oci/notation.yaml new file mode 100644 index 000000000..6434479ea --- /dev/null +++ b/config/testdata/helmchart-from-oci/notation.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo-notation +spec: + url: oci://ghcr.io/stefanprodan/charts + type: "oci" + interval: 1m +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo-notation +spec: + chart: podinfo + sourceRef: + kind: HelmRepository + name: podinfo-notation + version: '6.6.0' + interval: 1m + verify: + provider: notation + secretRef: + name: notation-config diff --git a/config/testdata/helmchart-from-oci/source.yaml b/config/testdata/helmchart-from-oci/source.yaml new file mode 100644 index 000000000..b2786531e --- /dev/null +++ b/config/testdata/helmchart-from-oci/source.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo +spec: + url: oci://ghcr.io/stefanprodan/charts + type: "oci" + interval: 1m +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + chart: podinfo + sourceRef: + kind: HelmRepository + name: podinfo + version: '6.1.*' + interval: 1m +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo-keyless +spec: + chart: podinfo + sourceRef: + kind: HelmRepository + name: podinfo + version: '6.2.1' + interval: 1m + verify: + provider: cosign diff --git a/config/testdata/helmchart-valuesfile/gitrepository.yaml b/config/testdata/helmchart-valuesfile/gitrepository.yaml index b620c8560..279979e93 100644 --- a/config/testdata/helmchart-valuesfile/gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: podinfo diff --git a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml index 911132d84..3c26b3eb5 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo-git @@ -8,4 +8,5 @@ spec: kind: GitRepository name: podinfo chart: charts/podinfo - valuesFile: charts/podinfo/values-prod.yaml + valuesFiles: + - charts/podinfo/values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml index 4674622b0..0b004eb7a 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -8,4 +8,5 @@ spec: kind: HelmRepository name: podinfo chart: podinfo - valuesFile: values-prod.yaml + valuesFiles: + - values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmrepository.yaml index ab568384c..f0c178695 100644 --- a/config/testdata/helmchart-valuesfile/helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo diff --git a/config/testdata/ocirepository/signed-with-key.yaml b/config/testdata/ocirepository/signed-with-key.yaml new file mode 100644 index 000000000..0a3a652ee --- /dev/null +++ b/config/testdata/ocirepository/signed-with-key.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-key +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/podinfo-deploy + ref: + semver: "6.2.x" + verify: + provider: cosign + secretRef: + name: cosign-key diff --git a/config/testdata/ocirepository/signed-with-keyless.yaml b/config/testdata/ocirepository/signed-with-keyless.yaml new file mode 100644 index 000000000..ff46ed30d --- /dev/null +++ b/config/testdata/ocirepository/signed-with-keyless.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-keyless +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + semver: "6.2.x" + verify: + provider: cosign diff --git a/config/testdata/ocirepository/signed-with-notation.yaml b/config/testdata/ocirepository/signed-with-notation.yaml new file mode 100644 index 000000000..55820f6d4 --- /dev/null +++ b/config/testdata/ocirepository/signed-with-notation.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-notation +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/podinfo-deploy + ref: + semver: "6.6.x" + verify: + provider: notation + secretRef: + name: notation-config diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go deleted file mode 100644 index 3d1fabead..000000000 --- a/controllers/bucket_controller.go +++ /dev/null @@ -1,434 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/go-logr/logr" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" - "github.com/fluxcd/pkg/runtime/predicates" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" -) - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch - -// BucketReconciler reconciles a Bucket object -type BucketReconciler struct { - client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder -} - -type BucketReconcilerOptions struct { - MaxConcurrentReconciles int -} - -func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) -} - -func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error { - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.Bucket{}). - WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). - WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}). - Complete(r) -} - -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - start := time.Now() - log := ctrl.LoggerFrom(ctx) - - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } - - // Return early if the object is suspended. - if bucket.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) - } - r.recordReadiness(ctx, reconciledBucket) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Now().Sub(start).String(), - bucket.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil -} - -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - s3Client, err := r.auth(ctx, bucket) - if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err - } - - // create tmp dir - tempDir, err := ioutil.TempDir("", bucket.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer os.RemoveAll(tempDir) - - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) - defer cancel() - - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - - if strings.HasSuffix(object.Key, "/") { - continue - } - - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - } - - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) - } - return bucket, nil - } - - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, bucket.Spec.Ignore); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil -} - -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } - - // Record deleted status - r.recordReadiness(ctx, bucket) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err - } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -func (r *BucketReconciler) auth(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, - } - - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - - accesskey := "" - secretkey := "" - if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) - } - if accesskey == "" || secretkey == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") - } - - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") - } - - return minio.New(bucket.Spec.Endpoint, &opt) -} - -func (r *BucketReconciler) checksum(root string) (string, error) { - checksum := "" - err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - data, err := ioutil.ReadFile(path) - if err != nil { - return err - } - checksum += fmt.Sprintf("%x", sha1.Sum(data)) - return nil - }) - if err != nil { - return "", err - } - - return fmt.Sprintf("%x", sha1.Sum([]byte(checksum))), nil -} - -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true - } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true - } - return bucket, false -} - -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) - } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) - } - return nil -} - -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := logr.FromContext(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := logr.FromContext(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) - } -} - -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err - } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus - - return r.Status().Patch(ctx, &bucket, patch) -} diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go deleted file mode 100644 index ccad45e90..000000000 --- a/controllers/gitrepository_controller.go +++ /dev/null @@ -1,377 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "time" - - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" - "github.com/fluxcd/pkg/runtime/predicates" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/pkg/git" - "github.com/fluxcd/source-controller/pkg/git/common" -) - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -// GitRepositoryReconciler reconciles a GitRepository object -type GitRepositoryReconciler struct { - client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder -} - -type GitRepositoryReconcilerOptions struct { - MaxConcurrentReconciles int -} - -func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) -} - -func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error { - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.GitRepository{}). - WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). - WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}). - Complete(r) -} - -func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - start := time.Now() - log := logr.FromContext(ctx) - - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !repository.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, repository) - } - - // Return early if the object is suspended. - if repository.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetRepository, ok := r.resetStatus(repository); ok { - repository = resetRepository - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, repository) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok { - repository.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(repository); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile repository by pulling the latest Git commit - reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledRepository) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // emit revision change event - if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision { - r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.GitRepositoryReadyMessage(reconciledRepository)) - } - r.recordReadiness(ctx, reconciledRepository) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Now().Sub(start).String(), - repository.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil -} - -func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sourcev1.GitRepository) (sourcev1.GitRepository, error) { - // create tmp dir for the Git clone - tmpGit, err := ioutil.TempDir("", repository.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer os.RemoveAll(tmpGit) - - // determine auth method - auth := &common.Auth{} - if repository.Spec.SecretRef != nil { - authStrategy, err := git.AuthSecretStrategyForURL(repository.Spec.URL, repository.Spec.GitImplementation) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err - } - - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } - - var secret corev1.Secret - err = r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err - } - - auth, err = authStrategy.Method(secret) - if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err - } - } - - checkoutStrategy, err := git.CheckoutStrategyForRef(repository.Spec.Reference, repository.Spec.GitImplementation) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err - } - commit, revision, err := checkoutStrategy.Checkout(ctx, tmpGit, repository.Spec.URL, auth) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err - } - - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", commit.Hash())) - if apimeta.IsStatusConditionTrue(repository.Status.Conditions, meta.ReadyCondition) && repository.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != repository.GetArtifact().URL { - r.Storage.SetArtifactURL(repository.GetArtifact()) - repository.Status.URL = r.Storage.SetHostname(repository.Status.URL) - } - return repository, nil - } - - // verify PGP signature - if repository.Spec.Verification != nil { - publicKeySecret := types.NamespacedName{ - Namespace: repository.Namespace, - Name: repository.Spec.Verification.SecretRef.Name, - } - var secret corev1.Secret - if err := r.Client.Get(ctx, publicKeySecret, &secret); err != nil { - err = fmt.Errorf("PGP public keys secret error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err - } - - err := commit.Verify(secret) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err - } - } - - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tmpGit, repository.Spec.Ignore); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.GitRepositoryReady(repository, artifact, url, sourcev1.GitOperationSucceedReason, message), nil -} - -func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.GitRepository) (ctrl.Result, error) { - if err := r.gc(repository); err != nil { - r.event(ctx, repository, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } - - // Record deleted status - r.recordReadiness(ctx, repository) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - return ctrl.Result{}, err - } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -// resetStatus returns a modified v1beta1.GitRepository and a boolean indicating -// if the status field has been reset. -func (r *GitRepositoryReconciler) resetStatus(repository sourcev1.GitRepository) (sourcev1.GitRepository, bool) { - // We do not have an artifact, or it does no longer exist - if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) { - repository = sourcev1.GitRepositoryProgressing(repository) - repository.Status.Artifact = nil - return repository, true - } - if repository.Generation != repository.Status.ObservedGeneration { - return sourcev1.GitRepositoryProgressing(repository), true - } - return repository, false -} - -// gc performs a garbage collection for the given v1beta1.GitRepository. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error { - if !repository.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*")) - } - if repository.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*repository.GetArtifact()) - } - return nil -} - -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) { - log := logr.FromContext(ctx) - - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *GitRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.GitRepository) { - log := logr.FromContext(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !repository.DeletionTimestamp.IsZero()) - } -} - -func (r *GitRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.GitRepositoryStatus) error { - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return err - } - - patch := client.MergeFrom(repository.DeepCopy()) - repository.Status = newStatus - - return r.Status().Patch(ctx, &repository, patch) -} diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go deleted file mode 100644 index f5596ca94..000000000 --- a/controllers/gitrepository_controller_test.go +++ /dev/null @@ -1,328 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "net/url" - "os" - "path" - "strings" - "time" - - "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/fluxcd/pkg/gittestserver" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" -) - -var _ = Describe("GitRepositoryReconciler", func() { - - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 1 - ) - - Context("GitRepository", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "git-repository-test" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - }) - - AfterEach(func() { - os.RemoveAll(gitServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - type refTestCase struct { - reference *sourcev1.GitRepositoryRef - createRefs []string - - waitForReason string - - expectStatus metav1.ConditionStatus - expectMessage string - expectRevision string - - gitImplementation string - } - - DescribeTable("Git references tests", func(t refTestCase) { - err = gitServer.StartHTTP() - defer gitServer.StopHTTP() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - gitrepo.Worktree() - - for _, ref := range t.createRefs { - hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit) - err = gitrepo.Storer.SetReference(hRef) - Expect(err).NotTo(HaveOccurred()) - } - - remote, err := gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - t.reference.Commit = strings.Replace(t.reference.Commit, "", commit.String(), 1) - - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - created := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: t.reference, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - got := &sourcev1.GitRepository{} - var cond metav1.Condition - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == t.waitForReason { - cond = c - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - Expect(cond.Status).To(Equal(t.expectStatus)) - Expect(cond.Message).To(ContainSubstring(t.expectMessage)) - Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == "")) - if t.expectRevision != "" { - Expect(got.Status.Artifact.Revision).To(Equal(t.expectRevision + "/" + commit.String())) - } - }, - Entry("branch", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - }), - Entry("branch non existing", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "invalid-branch"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "couldn't find remote ref", - }), - Entry("tag", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Tag: "some-tag"}, - createRefs: []string{"refs/tags/some-tag"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-tag", - }), - Entry("tag non existing", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Tag: "invalid-tag"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "couldn't find remote ref", - }), - Entry("semver", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"}, - createRefs: []string{"refs/tags/v1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "v1.0.0", - }), - Entry("semver range", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"}, - createRefs: []string{"refs/tags/0.1.0", "refs/tags/0.1.1", "refs/tags/0.2.0", "refs/tags/1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "0.2.0", - }), - Entry("mixed semver range", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"}, - createRefs: []string{"refs/tags/0.1.0", "refs/tags/v0.1.1", "refs/tags/v0.2.0", "refs/tags/1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "v0.2.0", - }), - Entry("semver invalid", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.2.3.4"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "semver parse range error: improper constraint: 1.2.3.4", - }), - Entry("semver no match", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "no match found for semver: 1.0.0", - }), - Entry("commit", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Commit: "", - }, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "master", - }), - Entry("commit in branch", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Branch: "some-branch", - Commit: "", - }, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - }), - Entry("invalid commit", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Branch: "master", - Commit: "invalid", - }, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "git commit 'invalid' not found: object not found", - }), - ) - - DescribeTable("Git self signed cert tests", func(t refTestCase) { - err = gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - defer gitServer.StopHTTP() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - created := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: t.reference, - GitImplementation: t.gitImplementation, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - got := &sourcev1.GitRepository{} - var cond metav1.Condition - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == t.waitForReason { - cond = c - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - Expect(cond.Status).To(Equal(t.expectStatus)) - Expect(cond.Message).To(ContainSubstring(t.expectMessage)) - Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == "")) - }, - Entry("self signed v1", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "main"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "x509: certificate signed by unknown authority", - }), - Entry("self signed v2", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "main"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "error: user rejected certificate", - gitImplementation: sourcev1.LibGit2Implementation, - }), - ) - }) -}) diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go deleted file mode 100644 index 88eb50c51..000000000 --- a/controllers/helmchart_controller.go +++ /dev/null @@ -1,927 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "regexp" - "strings" - "time" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/go-logr/logr" - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/chartutil" - "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" - "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/pkg/untar" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/internal/helm" - "github.com/fluxcd/source-controller/internal/util" -) - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -// HelmChartReconciler reconciles a HelmChart object -type HelmChartReconciler struct { - client.Client - Scheme *runtime.Scheme - Storage *Storage - Getters getter.Providers - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder -} - -func (r *HelmChartReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, HelmChartReconcilerOptions{}) -} - -func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { - if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, - r.indexHelmRepositoryByURL); err != nil { - return fmt.Errorf("failed setting index fields: %w", err) - } - if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmChart{}, sourcev1.SourceIndexKey, - r.indexHelmChartBySource); err != nil { - return fmt.Errorf("failed setting index fields: %w", err) - } - - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.HelmChart{}, builder.WithPredicates( - predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), - )). - Watches( - &source.Kind{Type: &sourcev1.HelmRepository{}}, - handler.EnqueueRequestsFromMapFunc(r.requestsForHelmRepositoryChange), - builder.WithPredicates(SourceRevisionChangePredicate{}), - ). - Watches( - &source.Kind{Type: &sourcev1.GitRepository{}}, - handler.EnqueueRequestsFromMapFunc(r.requestsForGitRepositoryChange), - builder.WithPredicates(SourceRevisionChangePredicate{}), - ). - Watches( - &source.Kind{Type: &sourcev1.Bucket{}}, - handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange), - builder.WithPredicates(SourceRevisionChangePredicate{}), - ). - WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}). - Complete(r) -} - -func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - start := time.Now() - log := logr.FromContext(ctx) - - var chart sourcev1.HelmChart - if err := r.Get(ctx, req.NamespacedName, &chart); err != nil { - return ctrl.Result{Requeue: true}, client.IgnoreNotFound(err) - } - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&chart, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(&chart, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &chart); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !chart.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, chart) - } - - // Return early if the object is suspended. - if chart.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // Record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // Conditionally set progressing condition in status - resetChart, changed := r.resetStatus(chart) - if changed { - chart = resetChart - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, chart) - } - - // Record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(chart.GetAnnotations()); ok { - chart.Status.SetLastHandledReconcileRequest(v) - } - - // Purge all but current artifact from storage - if err := r.gc(chart); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // Retrieve the source - source, err := r.getSource(ctx, chart) - if err != nil { - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - } - return ctrl.Result{Requeue: true}, err - } - - // Assert source is ready - if source.GetArtifact() == nil { - err = fmt.Errorf("no artifact found for source `%s` kind '%s'", - chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind) - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - } - r.recordReadiness(ctx, chart) - return ctrl.Result{Requeue: true}, err - } - - // Perform the reconciliation for the chart source type - var reconciledChart sourcev1.HelmChart - var reconcileErr error - switch typedSource := source.(type) { - case *sourcev1.HelmRepository: - reconciledChart, reconcileErr = r.reconcileFromHelmRepository(ctx, *typedSource, *chart.DeepCopy(), changed) - case *sourcev1.GitRepository, *sourcev1.Bucket: - reconciledChart, reconcileErr = r.reconcileFromTarballArtifact(ctx, *typedSource.GetArtifact(), - *chart.DeepCopy(), changed) - default: - err := fmt.Errorf("unable to reconcile unsupported source reference kind '%s'", chart.Spec.SourceRef.Kind) - return ctrl.Result{Requeue: false}, err - } - - // Update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledChart.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // If reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledChart, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledChart) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // Emit an event if we did not have an artifact before, or the revision has changed - if (chart.GetArtifact() == nil && reconciledChart.GetArtifact() != nil) || - (chart.GetArtifact() != nil && reconciledChart.GetArtifact() != nil && reconciledChart.GetArtifact().Revision != chart.GetArtifact().Revision) { - r.event(ctx, reconciledChart, events.EventSeverityInfo, sourcev1.HelmChartReadyMessage(reconciledChart)) - } - r.recordReadiness(ctx, reconciledChart) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Now().Sub(start).String(), - chart.GetInterval().Duration.String(), - )) - return ctrl.Result{RequeueAfter: chart.GetInterval().Duration}, nil -} - -type HelmChartReconcilerOptions struct { - MaxConcurrentReconciles int -} - -func (r *HelmChartReconciler) getSource(ctx context.Context, chart sourcev1.HelmChart) (sourcev1.Source, error) { - var source sourcev1.Source - namespacedName := types.NamespacedName{ - Namespace: chart.GetNamespace(), - Name: chart.Spec.SourceRef.Name, - } - switch chart.Spec.SourceRef.Kind { - case sourcev1.HelmRepositoryKind: - var repository sourcev1.HelmRepository - err := r.Client.Get(ctx, namespacedName, &repository) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &repository - case sourcev1.GitRepositoryKind: - var repository sourcev1.GitRepository - err := r.Client.Get(ctx, namespacedName, &repository) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &repository - case sourcev1.BucketKind: - var bucket sourcev1.Bucket - err := r.Client.Get(ctx, namespacedName, &bucket) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &bucket - default: - return source, fmt.Errorf("source `%s` kind '%s' not supported", - chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind) - } - return source, nil -} - -func (r *HelmChartReconciler) reconcileFromHelmRepository(ctx context.Context, - repository sourcev1.HelmRepository, chart sourcev1.HelmChart, force bool) (sourcev1.HelmChart, error) { - // TODO: move this to a validation webhook once the discussion around - // certificates has settled: https://github.com/fluxcd/image-reflector-controller/issues/69 - if err := validHelmChartName(chart.Spec.Chart); err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), nil - } - - // Configure ChartRepository getter options - var clientOpts []getter.Option - if secret, err := r.getHelmRepositorySecret(ctx, &repository); err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.AuthenticationFailedReason, err.Error()), err - } else if secret != nil { - opts, cleanup, err := helm.ClientOptionsFromSecret(*secret) - if err != nil { - err = fmt.Errorf("auth options error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.AuthenticationFailedReason, err.Error()), err - } - defer cleanup() - - clientOpts = opts - } - clientOpts = append(clientOpts, getter.WithTimeout(repository.Spec.Timeout.Duration)) - - // Initialize the chart repository and load the index file - chartRepo, err := helm.NewChartRepository(repository.Spec.URL, r.Getters, clientOpts) - if err != nil { - switch err.(type) { - case *url.Error: - return sourcev1.HelmChartNotReady(chart, sourcev1.URLInvalidReason, err.Error()), err - default: - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - } - indexFile, err := os.Open(r.Storage.LocalPath(*repository.GetArtifact())) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - b, err := ioutil.ReadAll(indexFile) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - if err = chartRepo.LoadIndex(b); err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - - // Lookup the chart version in the chart repository index - chartVer, err := chartRepo.Get(chart.Spec.Chart, chart.Spec.Version) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - - // Return early if the revision is still the same as the current artifact - newArtifact := r.Storage.NewArtifactFor(chart.Kind, chart.GetObjectMeta(), chartVer.Version, - fmt.Sprintf("%s-%s.tgz", chartVer.Name, chartVer.Version)) - if !force && repository.GetArtifact().HasRevision(newArtifact.Revision) { - if newArtifact.URL != chart.GetArtifact().URL { - r.Storage.SetArtifactURL(chart.GetArtifact()) - chart.Status.URL = r.Storage.SetHostname(chart.Status.URL) - } - return chart, nil - } - - // Ensure artifact directory exists - err = r.Storage.MkdirAll(newArtifact) - if err != nil { - err = fmt.Errorf("unable to create chart directory: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Acquire a lock for the artifact - unlock, err := r.Storage.Lock(newArtifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // Attempt to download the chart - res, err := chartRepo.DownloadChart(chartVer) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - - // Either repackage the chart with the declared default values file, - // or write the chart directly to storage. - var ( - readyReason = sourcev1.ChartPullSucceededReason - readyMessage = fmt.Sprintf("Fetched revision: %s", newArtifact.Revision) - ) - switch { - case chart.Spec.ValuesFile != "" && chart.Spec.ValuesFile != chartutil.ValuesfileName: - var ( - tmpDir string - pkgPath string - ) - // Load the chart - helmChart, err := loader.LoadArchive(res) - if err != nil { - err = fmt.Errorf("load chart error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Find override file and retrieve contents - var valuesData []byte - cfn := filepath.Clean(chart.Spec.ValuesFile) - for _, f := range helmChart.Files { - if f.Name == cfn { - valuesData = f.Data - break - } - } - - // Overwrite values file - if changed, err := helm.OverwriteChartDefaultValues(helmChart, valuesData); err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPackageFailedReason, err.Error()), err - } else if !changed { - // No changes, skip to write original package to storage - goto skipToDefault - } - - // Create temporary working directory - tmpDir, err = ioutil.TempDir("", fmt.Sprintf("%s-%s-", chart.Namespace, chart.Name)) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer os.RemoveAll(tmpDir) - - // Package the chart with the new default values - pkgPath, err = chartutil.Save(helmChart, tmpDir) - if err != nil { - err = fmt.Errorf("chart package error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPackageFailedReason, err.Error()), err - } - - // Copy the packaged chart to the artifact path - if err := r.Storage.CopyFromPath(&newArtifact, pkgPath); err != nil { - err = fmt.Errorf("failed to write chart package to storage: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - readyMessage = fmt.Sprintf("Fetched and packaged revision: %s", newArtifact.Revision) - readyReason = sourcev1.ChartPackageSucceededReason - skipToDefault: - fallthrough - default: - // Write artifact to storage - if err := r.Storage.AtomicWriteFile(&newArtifact, res, 0644); err != nil { - err = fmt.Errorf("unable to write chart file: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - } - - // Update symlink - chartUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", chartVer.Name)) - if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - return sourcev1.HelmChartReady(chart, newArtifact, chartUrl, readyReason, readyMessage), nil -} - -func (r *HelmChartReconciler) reconcileFromTarballArtifact(ctx context.Context, - artifact sourcev1.Artifact, chart sourcev1.HelmChart, force bool) (sourcev1.HelmChart, error) { - // Create temporary working directory - tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s-", chart.Namespace, chart.Name)) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer os.RemoveAll(tmpDir) - - // Open the tarball artifact file and untar files into working directory - f, err := os.Open(r.Storage.LocalPath(artifact)) - if err != nil { - err = fmt.Errorf("artifact open error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - if _, err = untar.Untar(f, tmpDir); err != nil { - f.Close() - err = fmt.Errorf("artifact untar error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - f.Close() - - // Load the chart - chartPath, err := securejoin.SecureJoin(tmpDir, chart.Spec.Chart) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - chartFileInfo, err := os.Stat(chartPath) - if err != nil { - err = fmt.Errorf("chart location read error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - helmChart, err := loader.Load(chartPath) - if err != nil { - err = fmt.Errorf("load chart error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Return early if the revision is still the same as the current chart artifact - newArtifact := r.Storage.NewArtifactFor(chart.Kind, chart.ObjectMeta.GetObjectMeta(), helmChart.Metadata.Version, - fmt.Sprintf("%s-%s.tgz", helmChart.Metadata.Name, helmChart.Metadata.Version)) - if !force && apimeta.IsStatusConditionTrue(chart.Status.Conditions, meta.ReadyCondition) && chart.GetArtifact().HasRevision(newArtifact.Revision) { - if newArtifact.URL != artifact.URL { - r.Storage.SetArtifactURL(chart.GetArtifact()) - chart.Status.URL = r.Storage.SetHostname(chart.Status.URL) - } - return chart, nil - } - - // Either (re)package the chart with the declared default values file, - // or write the chart directly to storage. - pkgPath := chartPath - isValuesFileOverriden := false - if chart.Spec.ValuesFile != "" { - srcPath, err := securejoin.SecureJoin(tmpDir, chart.Spec.ValuesFile) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - if f, err := os.Stat(srcPath); os.IsNotExist(err) || !f.Mode().IsRegular() { - err = fmt.Errorf("invalid values file path: %s", chart.Spec.ValuesFile) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - valuesData, err := ioutil.ReadFile(srcPath) - if err != nil { - err = fmt.Errorf("failed to read from values file '%s': %w", chart.Spec.ValuesFile, err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - isValuesFileOverriden, err = helm.OverwriteChartDefaultValues(helmChart, valuesData) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPackageFailedReason, err.Error()), err - } - } - - isDir := chartFileInfo.IsDir() - switch { - case isDir: - // Determine chart dependencies - deps := helmChart.Dependencies() - reqs := helmChart.Metadata.Dependencies - lock := helmChart.Lock - if lock != nil { - // Load from lockfile if exists - reqs = lock.Dependencies - } - var dwr []*helm.DependencyWithRepository - for _, dep := range reqs { - // Exclude existing dependencies - for _, existing := range deps { - if existing.Name() == dep.Name { - continue - } - } - - // Continue loop if file scheme detected - if strings.HasPrefix(dep.Repository, "file://") { - dwr = append(dwr, &helm.DependencyWithRepository{ - Dependency: dep, - Repository: nil, - }) - continue - } - - // Discover existing HelmRepository by URL - repository, err := r.resolveDependencyRepository(ctx, dep, chart.Namespace) - if err != nil { - repository = &sourcev1.HelmRepository{ - Spec: sourcev1.HelmRepositorySpec{ - URL: dep.Repository, - }, - } - } - - // Configure ChartRepository getter options - var clientOpts []getter.Option - if secret, err := r.getHelmRepositorySecret(ctx, repository); err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.AuthenticationFailedReason, err.Error()), err - } else if secret != nil { - opts, cleanup, err := helm.ClientOptionsFromSecret(*secret) - if err != nil { - err = fmt.Errorf("auth options error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.AuthenticationFailedReason, err.Error()), err - } - defer cleanup() - - clientOpts = opts - } - - // Initialize the chart repository and load the index file - chartRepo, err := helm.NewChartRepository(repository.Spec.URL, r.Getters, clientOpts) - if err != nil { - switch err.(type) { - case *url.Error: - return sourcev1.HelmChartNotReady(chart, sourcev1.URLInvalidReason, err.Error()), err - default: - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - } - if repository.Status.Artifact != nil { - indexFile, err := os.Open(r.Storage.LocalPath(*repository.GetArtifact())) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - b, err := ioutil.ReadAll(indexFile) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - if err = chartRepo.LoadIndex(b); err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - } else { - // Download index - err = chartRepo.DownloadIndex() - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPullFailedReason, err.Error()), err - } - } - - dwr = append(dwr, &helm.DependencyWithRepository{ - Dependency: dep, - Repository: chartRepo, - }) - } - - // Construct dependencies for chart if any - if len(dwr) > 0 { - dm := &helm.DependencyManager{ - WorkingDir: tmpDir, - ChartPath: chart.Spec.Chart, - Chart: helmChart, - Dependencies: dwr, - } - err = dm.Build(ctx) - if err != nil { - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - } - - fallthrough - case isValuesFileOverriden: - pkgPath, err = chartutil.Save(helmChart, tmpDir) - if err != nil { - err = fmt.Errorf("chart package error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.ChartPackageFailedReason, err.Error()), err - } - } - - // Ensure artifact directory exists - err = r.Storage.MkdirAll(newArtifact) - if err != nil { - err = fmt.Errorf("unable to create artifact directory: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Acquire a lock for the artifact - unlock, err := r.Storage.Lock(newArtifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // Copy the packaged chart to the artifact path - if err := r.Storage.CopyFromPath(&newArtifact, pkgPath); err != nil { - err = fmt.Errorf("failed to write chart package to storage: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Update symlink - cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", helmChart.Metadata.Name)) - if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmChartNotReady(chart, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - message := fmt.Sprintf("Fetched and packaged revision: %s", newArtifact.Revision) - return sourcev1.HelmChartReady(chart, newArtifact, cUrl, sourcev1.ChartPackageSucceededReason, message), nil -} - -func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, chart sourcev1.HelmChart) (ctrl.Result, error) { - // Our finalizer is still present, so lets handle garbage collection - if err := r.gc(chart); err != nil { - r.event(ctx, chart, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } - - // Record deleted status - r.recordReadiness(ctx, chart) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&chart, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &chart); err != nil { - return ctrl.Result{}, err - } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -// resetStatus returns a modified v1beta1.HelmChart and a boolean indicating -// if the status field has been reset. -func (r *HelmChartReconciler) resetStatus(chart sourcev1.HelmChart) (sourcev1.HelmChart, bool) { - // We do not have an artifact, or it does no longer exist - if chart.GetArtifact() == nil || !r.Storage.ArtifactExist(*chart.GetArtifact()) { - chart = sourcev1.HelmChartProgressing(chart) - chart.Status.Artifact = nil - return chart, true - } - // The chart specification has changed - if chart.Generation != chart.Status.ObservedGeneration { - return sourcev1.HelmChartProgressing(chart), true - } - return chart, false -} - -// gc performs a garbage collection for the given v1beta1.HelmChart. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error { - if !chart.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(chart.Kind, chart.GetObjectMeta(), "", "*")) - } - if chart.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*chart.GetArtifact()) - } - return nil -} - -// event emits a Kubernetes event and forwards the event to notification -// controller if configured. -func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) { - log := logr.FromContext(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&chart, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *HelmChartReconciler) recordReadiness(ctx context.Context, chart sourcev1.HelmChart) { - log := logr.FromContext(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(chart.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !chart.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !chart.DeletionTimestamp.IsZero()) - } -} - -func (r *HelmChartReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmChartStatus) error { - var chart sourcev1.HelmChart - if err := r.Get(ctx, req.NamespacedName, &chart); err != nil { - return err - } - - patch := client.MergeFrom(chart.DeepCopy()) - chart.Status = newStatus - - return r.Status().Patch(ctx, &chart, patch) -} - -func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string { - repo, ok := o.(*sourcev1.HelmRepository) - if !ok { - panic(fmt.Sprintf("Expected a HelmRepository, got %T", o)) - } - u := helm.NormalizeChartRepositoryURL(repo.Spec.URL) - if u != "" { - return []string{u} - } - return nil -} - -func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { - hc, ok := o.(*sourcev1.HelmChart) - if !ok { - panic(fmt.Sprintf("Expected a HelmChart, got %T", o)) - } - return []string{fmt.Sprintf("%s/%s", hc.Spec.SourceRef.Kind, hc.Spec.SourceRef.Name)} -} - -func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, dep *helmchart.Dependency, namespace string) (*sourcev1.HelmRepository, error) { - u := helm.NormalizeChartRepositoryURL(dep.Repository) - if u == "" { - return nil, fmt.Errorf("invalid repository URL") - } - - listOpts := []client.ListOption{ - client.InNamespace(namespace), - client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: u}, - } - var list sourcev1.HelmRepositoryList - err := r.Client.List(ctx, &list, listOpts...) - if err != nil { - return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) - } - if len(list.Items) > 0 { - return &list.Items[0], nil - } - - return nil, fmt.Errorf("no HelmRepository found") -} - -func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) { - if repository.Spec.SecretRef != nil { - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } - - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return nil, err - } - return &secret, nil - } - - return nil, nil -} - -func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) []reconcile.Request { - repo, ok := o.(*sourcev1.HelmRepository) - if !ok { - panic(fmt.Sprintf("Expected a HelmRepository, got %T", o)) - } - // If we do not have an artifact, we have no requests to make - if repo.GetArtifact() == nil { - return nil - } - - ctx := context.Background() - var list sourcev1.HelmChartList - if err := r.List(ctx, &list, client.MatchingFields{ - sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.HelmRepositoryKind, repo.Name), - }); err != nil { - return nil - } - - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. - var reqs []reconcile.Request - for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: util.ObjectKey(&i)}) - } - return reqs -} - -func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) []reconcile.Request { - repo, ok := o.(*sourcev1.GitRepository) - if !ok { - panic(fmt.Sprintf("Expected a GitRepository, got %T", o)) - } - - // If we do not have an artifact, we have no requests to make - if repo.GetArtifact() == nil { - return nil - } - - var list sourcev1.HelmChartList - if err := r.List(context.TODO(), &list, client.MatchingFields{ - sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.GitRepositoryKind, repo.Name), - }); err != nil { - return nil - } - - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. - var reqs []reconcile.Request - for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: util.ObjectKey(&i)}) - } - return reqs -} - -func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconcile.Request { - bucket, ok := o.(*sourcev1.Bucket) - if !ok { - panic(fmt.Sprintf("Expected a Bucket, got %T", o)) - } - - // If we do not have an artifact, we have no requests to make - if bucket.GetArtifact() == nil { - return nil - } - - var list sourcev1.HelmChartList - if err := r.List(context.TODO(), &list, client.MatchingFields{ - sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name), - }); err != nil { - return nil - } - - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. - var reqs []reconcile.Request - for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: util.ObjectKey(&i)}) - } - return reqs -} - -// validHelmChartName returns an error if the given string is not a -// valid Helm chart name; a valid name must be lower case letters -// and numbers, words may be separated with dashes (-). -// Ref: https://helm.sh/docs/chart_best_practices/conventions/#chart-names -func validHelmChartName(s string) error { - chartFmt := regexp.MustCompile("^([-a-z0-9]*)$") - if !chartFmt.MatchString(s) { - return fmt.Errorf("invalid chart name %q, a valid name must be lower case letters and numbers and MAY be seperated with dashes (-)", s) - } - return nil -} diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go deleted file mode 100644 index 0c39e482d..000000000 --- a/controllers/helmchart_controller_test.go +++ /dev/null @@ -1,1014 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/helmtestserver" - "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/chartutil" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/yaml" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" -) - -var _ = Describe("HelmChartReconciler", func() { - - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 2 - pullInterval = time.Second * 3 - ) - - Context("HelmChart from HelmRepository", func() { - var ( - namespace *corev1.Namespace - helmServer *helmtestserver.HelmServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "helm-chart-test-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - helmServer.Start() - }) - - AfterEach(func() { - os.RemoveAll(helmServer.Root()) - helmServer.Stop() - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - Expect(k8sClient.Create(context.Background(), &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - })).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Packaging a new chart version and regenerating the index") - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), "0.2.0")).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - By("Expecting new artifact revision and GC") - Eventually(func() bool { - now := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing HelmRepository error") - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.SourceRef.Name = "invalid" - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, updated) - for _, c := range updated.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason && - strings.Contains(c.Message, "failed to retrieve source") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(updated.Status.Artifact).ToNot(BeNil()) - - By("Expecting to delete successfully") - got = &sourcev1.HelmChart{} - Eventually(func() error { - _ = k8sClient.Get(context.Background(), key, got) - return k8sClient.Delete(context.Background(), got) - }, timeout, interval).Should(Succeed()) - - By("Expecting delete to finish") - Eventually(func() error { - c := &sourcev1.HelmChart{} - return k8sClient.Get(context.Background(), key, c) - }, timeout, interval).ShouldNot(Succeed()) - - exists := func(path string) bool { - // wait for tmp sync on macOS - time.Sleep(time.Second) - _, err := os.Stat(path) - return err == nil - } - - By("Expecting GC on delete") - Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue()) - }) - - It("Filters versions", func() { - versions := []string{"0.1.0", "0.1.1", "0.2.0", "0.3.0-rc.1", "1.0.0-alpha.1", "1.0.0"} - for k := range versions { - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), versions[k])).Should(Succeed()) - } - - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - repository := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: 1 * time.Hour}, - }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: 1 * time.Hour}, - }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - Eventually(func() string { - _ = k8sClient.Get(context.Background(), key, chart) - if chart.Status.Artifact != nil { - return chart.Status.Artifact.Revision - } - return "" - }, timeout, interval).Should(Equal("1.0.0")) - - chart.Spec.Version = "<0.2.0" - Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) - Eventually(func() string { - _ = k8sClient.Get(context.Background(), key, chart) - if chart.Status.Artifact != nil { - return chart.Status.Artifact.Revision - } - return "" - }, timeout, interval).Should(Equal("0.1.1")) - - chart.Spec.Version = "invalid" - Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, chart) - for _, c := range chart.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(chart.GetArtifact()).NotTo(BeNil()) - Expect(chart.Status.Artifact.Revision).Should(Equal("0.1.1")) - }) - - It("Authenticates when credentials are provided", func() { - helmServer.Stop() - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() - - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), "0.1.0")).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, - Data: map[string][]byte{ - "username": []byte(username), - "password": []byte(password), - }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Creating repository and waiting for artifact") - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - repository := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), repositoryKey, repository) - return repository.Status.Artifact != nil - }, timeout, interval).Should(BeTrue()) - - By("Deleting secret before applying HelmChart") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - - By("Applying HelmChart") - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting missing secret error") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason && - strings.Contains(c.Message, "auth secret error") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Applying secret with missing keys") - secret.ResourceVersion = "" - secret.Data["username"] = []byte{} - secret.Data["password"] = []byte{} - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Expecting 401") - Eventually(func() bool { - got := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason && - strings.Contains(c.Message, "401 Unauthorized") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Adding username key") - secret.Data["username"] = []byte(username) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - - By("Expecting missing field error") - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Adding password key") - secret.Data["password"] = []byte(password) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - - By("Expecting artifact") - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return apimeta.IsStatusConditionTrue(got.Status.Conditions, meta.ReadyCondition) - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ToNot(BeNil()) - }) - }) - - Context("HelmChart from GitRepository", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "test-git-repository-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - Expect(gitServer.StartHTTP()).To(Succeed()) - }) - - AfterEach(func() { - gitServer.StopHTTP() - os.RemoveAll(gitServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts" - Expect(filepath.Walk(chartDir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - switch { - case fi.Mode().IsDir(): - return fs.MkdirAll(p, os.ModeDir) - case !fi.Mode().IsRegular(): - return nil - } - - b, err := ioutil.ReadFile(p) - if err != nil { - return err - } - - ff, err := fs.Create(p) - if err != nil { - return err - } - if _, err := ff.Write(b); err != nil { - return err - } - _ = ff.Close() - _, err = wt.Add(p) - - return err - })).To(Succeed()) - - _, err = wt.Commit("Helm charts", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - repository := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "testdata/charts/helmchartwithdeps", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Committing a new version in the chart metadata") - f, err := fs.OpenFile(fs.Join(chartDir, "helmchartwithdeps", chartutil.ChartfileName), os.O_RDWR, os.FileMode(0600)) - Expect(err).NotTo(HaveOccurred()) - - b := make([]byte, 2048) - n, err := f.Read(b) - Expect(err).NotTo(HaveOccurred()) - b = b[0:n] - - y := new(helmchart.Metadata) - err = yaml.Unmarshal(b, y) - Expect(err).NotTo(HaveOccurred()) - - y.Version = "0.2.0" - b, err = yaml.Marshal(y) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.Write(b) - Expect(err).NotTo(HaveOccurred()) - - err = f.Close() - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Chart version bump", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }, - All: true, - }) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("Expecting new artifact revision and GC") - Eventually(func() bool { - now := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - }) - - It("Creates artifacts with .tgz file", func() { - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts/helmchart" - helmChart, err := loader.LoadDir(chartDir) - Expect(err).NotTo(HaveOccurred()) - - chartPackagePath, err := ioutil.TempDir("", fmt.Sprintf("chartpackage-%s-%s", helmChart.Name(), randStringRunes(5))) - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(chartPackagePath) - - pkg, err := chartutil.Save(helmChart, chartPackagePath) - Expect(err).NotTo(HaveOccurred()) - - b, err := ioutil.ReadFile(pkg) - Expect(err).NotTo(HaveOccurred()) - - tgz := filepath.Base(pkg) - ff, err := fs.Create(tgz) - Expect(err).NotTo(HaveOccurred()) - - _, err = ff.Write(b) - Expect(err).NotTo(HaveOccurred()) - - ff.Close() - _, err = wt.Add(tgz) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Helm chart", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - repository := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: tgz, - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - chart.Spec.ValuesFile = "./charts/helmchart/override.yaml" - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - chart.Spec.ValuesFile = "invalid.yaml" - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && got.Status.Artifact.Revision == updated.Status.Artifact.Revision - }, timeout, interval).Should(BeTrue()) - }) - }) - }) - - Context("HelmChart from GitRepository with HelmRepository dependency", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - helmServer *helmtestserver.HelmServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "test-git-repository-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - Expect(gitServer.StartHTTP()).To(Succeed()) - - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - helmServer.Start() - }) - - AfterEach(func() { - gitServer.StopHTTP() - os.RemoveAll(gitServer.Root()) - - os.RemoveAll(helmServer.Root()) - helmServer.Stop() - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - helmServer.Stop() - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, - Data: map[string][]byte{ - "username": []byte(username), - "password": []byte(password), - }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Creating repository and waiting for artifact") - helmRepositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - helmRepository := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: helmRepositoryKey.Name, - Namespace: helmRepositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), helmRepository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), helmRepository) - - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), helmRepositoryKey, helmRepository) - return helmRepository.Status.Artifact != nil - }, timeout, interval).Should(BeTrue()) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts/helmchartwithdeps" - Expect(filepath.Walk(chartDir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - switch { - case fi.Mode().IsDir(): - return fs.MkdirAll(p, os.ModeDir) - case !fi.Mode().IsRegular(): - return nil - } - - b, err := ioutil.ReadFile(p) - if err != nil { - return err - } - - ff, err := fs.Create(p) - if err != nil { - return err - } - if _, err := ff.Write(b); err != nil { - return err - } - _ = ff.Close() - _, err = wt.Add(p) - - return err - })).To(Succeed()) - - By("Configuring the chart dependency") - filePath := fs.Join(chartDir, chartutil.ChartfileName) - f, err := fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600)) - Expect(err).NotTo(HaveOccurred()) - - b := make([]byte, 2048) - n, err := f.Read(b) - Expect(err).NotTo(HaveOccurred()) - b = b[0:n] - - err = f.Close() - Expect(err).NotTo(HaveOccurred()) - - y := new(helmchart.Metadata) - err = yaml.Unmarshal(b, y) - Expect(err).NotTo(HaveOccurred()) - - y.Dependencies = []*helmchart.Dependency{ - { - Name: "helmchart", - Version: ">=0.1.0", - Repository: helmRepository.Spec.URL, - }, - } - - b, err = yaml.Marshal(y) - Expect(err).NotTo(HaveOccurred()) - - ff, err := fs.Create(filePath) - Expect(err).NotTo(HaveOccurred()) - - _, err = ff.Write(b) - Expect(err).NotTo(HaveOccurred()) - - err = ff.Close() - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Helm charts", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }, - All: true, - }) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - repository := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "testdata/charts/helmchartwithdeps", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - chart.Spec.ValuesFile = "override.yaml" - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - chart.Spec.ValuesFile = "./charts/helmchart/override.yaml" - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && got.Status.Artifact.Revision == updated.Status.Artifact.Revision - }, timeout, interval).Should(BeTrue()) - }) - }) - }) -}) - -func Test_validHelmChartName(t *testing.T) { - tests := []struct { - name string - chart string - expectErr bool - }{ - {"valid", "drupal", false}, - {"valid dash", "nginx-lego", false}, - {"valid dashes", "aws-cluster-autoscaler", false}, - {"valid alphanum", "ng1nx-leg0", false}, - {"invalid slash", "artifactory/invalid", true}, - {"invalid dot", "in.valid", true}, - {"invalid uppercase", "inValid", true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := validHelmChartName(tt.chart); (err != nil) != tt.expectErr { - t.Errorf("validHelmChartName() error = %v, expectErr %v", err, tt.expectErr) - } - }) - } -} diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go deleted file mode 100644 index d19a14842..000000000 --- a/controllers/helmrepository_controller.go +++ /dev/null @@ -1,362 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "bytes" - "context" - "fmt" - "net/url" - "time" - - "github.com/go-logr/logr" - "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/yaml" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" - "github.com/fluxcd/pkg/runtime/predicates" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/internal/helm" -) - -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete -// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -// HelmRepositoryReconciler reconciles a HelmRepository object -type HelmRepositoryReconciler struct { - client.Client - Scheme *runtime.Scheme - Storage *Storage - Getters getter.Providers - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder -} - -type HelmRepositoryReconcilerOptions struct { - MaxConcurrentReconciles int -} - -func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { - return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) -} - -func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error { - return ctrl.NewControllerManagedBy(mgr). - For(&sourcev1.HelmRepository{}). - WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). - WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}). - Complete(r) -} - -func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - start := time.Now() - log := logr.FromContext(ctx) - - var repository sourcev1.HelmRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !repository.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, repository) - } - - // Return early if the object is suspended. - if repository.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") - return ctrl.Result{}, nil - } - - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetRepository, ok := r.resetStatus(repository); ok { - repository = resetRepository - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, repository) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok { - repository.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(repository); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile repository by downloading the index.yaml file - reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledRepository) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // emit revision change event - if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision { - r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.HelmRepositoryReadyMessage(reconciledRepository)) - } - r.recordReadiness(ctx, reconciledRepository) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Now().Sub(start).String(), - repository.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil -} - -func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, repository sourcev1.HelmRepository) (sourcev1.HelmRepository, error) { - var clientOpts []getter.Option - if repository.Spec.SecretRef != nil { - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } - - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err - } - - opts, cleanup, err := helm.ClientOptionsFromSecret(secret) - if err != nil { - err = fmt.Errorf("auth options error: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err - } - defer cleanup() - clientOpts = opts - } - clientOpts = append(clientOpts, getter.WithTimeout(repository.Spec.Timeout.Duration)) - - chartRepo, err := helm.NewChartRepository(repository.Spec.URL, r.Getters, clientOpts) - if err != nil { - switch err.(type) { - case *url.Error: - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.URLInvalidReason, err.Error()), err - default: - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.IndexationFailedReason, err.Error()), err - } - } - if err := chartRepo.DownloadIndex(); err != nil { - err = fmt.Errorf("failed to download repository index: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.IndexationFailedReason, err.Error()), err - } - - // return early on unchanged generation - artifact := r.Storage.NewArtifactFor(repository.Kind, - repository.ObjectMeta.GetObjectMeta(), - chartRepo.Index.Generated.Format(time.RFC3339Nano), - fmt.Sprintf("index-%s.yaml", url.PathEscape(chartRepo.Index.Generated.Format(time.RFC3339Nano)))) - if apimeta.IsStatusConditionTrue(repository.Status.Conditions, meta.ReadyCondition) && repository.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != repository.GetArtifact().URL { - r.Storage.SetArtifactURL(repository.GetArtifact()) - repository.Status.URL = r.Storage.SetHostname(repository.Status.URL) - } - return repository, nil - } - - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("unable to create repository index directory: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // save artifact to storage - b, err := yaml.Marshal(&chartRepo.Index) - if err != nil { - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.IndexationFailedReason, err.Error()), err - } - if err := r.Storage.AtomicWriteFile(&artifact, bytes.NewReader(b), 0644); err != nil { - err = fmt.Errorf("unable to write repository index file: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // update index symlink - indexURL, err := r.Storage.Symlink(artifact, "index.yaml") - if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.HelmRepositoryReady(repository, artifact, indexURL, sourcev1.IndexationSucceededReason, message), nil -} - -func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.HelmRepository) (ctrl.Result, error) { - // Our finalizer is still present, so lets handle garbage collection - if err := r.gc(repository); err != nil { - r.event(ctx, repository, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } - - // Record deleted status - r.recordReadiness(ctx, repository) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - return ctrl.Result{}, err - } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -// resetStatus returns a modified v1beta1.HelmRepository and a boolean indicating -// if the status field has been reset. -func (r *HelmRepositoryReconciler) resetStatus(repository sourcev1.HelmRepository) (sourcev1.HelmRepository, bool) { - // We do not have an artifact, or it does no longer exist - if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) { - repository = sourcev1.HelmRepositoryProgressing(repository) - repository.Status.Artifact = nil - return repository, true - } - if repository.Generation != repository.Status.ObservedGeneration { - return sourcev1.HelmRepositoryProgressing(repository), true - } - return repository, false -} - -// gc performs a garbage collection for the given v1beta1.HelmRepository. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error { - if !repository.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*")) - } - if repository.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*repository.GetArtifact()) - } - return nil -} - -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) { - log := logr.FromContext(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *HelmRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.HelmRepository) { - log := logr.FromContext(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !repository.DeletionTimestamp.IsZero()) - } -} - -func (r *HelmRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmRepositoryStatus) error { - var repository sourcev1.HelmRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return err - } - - patch := client.MergeFrom(repository.DeepCopy()) - repository.Status = newStatus - - return r.Status().Patch(ctx, &repository, patch) -} diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go deleted file mode 100644 index 126ed11c5..000000000 --- a/controllers/helmrepository_controller_test.go +++ /dev/null @@ -1,411 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "net/http" - "os" - "path" - "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/helmtestserver" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" -) - -var _ = Describe("HelmRepositoryReconciler", func() { - - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 2 - repositoryTimeout = time.Second * 5 - ) - - Context("HelmRepository", func() { - var ( - namespace *corev1.Namespace - helmServer *helmtestserver.HelmServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "helm-repository-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - }) - - AfterEach(func() { - os.RemoveAll(helmServer.Root()) - helmServer.Stop() - - Eventually(func() error { - return k8sClient.Delete(context.Background(), namespace) - }, timeout, interval).Should(Succeed(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - Timeout: &metav1.Duration{Duration: repositoryTimeout}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - - By("Expecting artifact") - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Updating the chart index") - // Regenerating the index is sufficient to make the revision change - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - By("Expecting revision change and GC") - Eventually(func() bool { - now := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - updated := &sourcev1.HelmRepository{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.URL = "invalid#url?" - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, updated) - for _, c := range updated.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(updated.Status.Artifact).ToNot(BeNil()) - - By("Expecting to delete successfully") - got = &sourcev1.HelmRepository{} - Eventually(func() error { - _ = k8sClient.Get(context.Background(), key, got) - return k8sClient.Delete(context.Background(), got) - }, timeout, interval).Should(Succeed()) - - By("Expecting delete to finish") - Eventually(func() error { - r := &sourcev1.HelmRepository{} - return k8sClient.Get(context.Background(), key, r) - }, timeout, interval).ShouldNot(Succeed()) - - exists := func(path string) bool { - // wait for tmp sync on macOS - time.Sleep(time.Second) - _, err := os.Stat(path) - return err == nil - } - - By("Expecting GC after delete") - Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue()) - }) - - It("Handles timeout", func() { - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - By("Expecting index download to succeed") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, condition := range got.Status.Conditions { - if condition.Reason == sourcev1.IndexationSucceededReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting index download to timeout") - updated := &sourcev1.HelmRepository{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.Timeout = &metav1.Duration{Duration: time.Microsecond} - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() string { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, condition := range got.Status.Conditions { - if condition.Reason == sourcev1.IndexationFailedReason { - return condition.Message - } - } - return "" - }, timeout, interval).Should(MatchRegexp("(?i)timeout")) - }) - - It("Authenticates when basic auth credentials are provided", func() { - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).NotTo(HaveOccurred()) - - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - defer os.RemoveAll(helmServer.Root()) - defer helmServer.Stop() - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - By("Expecting 401") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason && - strings.Contains(c.Message, "401 Unauthorized") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing field error") - secret.Data = map[string][]byte{ - "username": []byte(username), - } - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting artifact") - secret.Data["password"] = []byte(password) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing secret error") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ShouldNot(BeNil()) - }) - - It("Authenticates when TLS credentials are provided", func() { - err = helmServer.StartTLS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - Expect(err).NotTo(HaveOccurred()) - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - By("Expecting unknown authority error") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason && - strings.Contains(c.Message, "certificate signed by unknown authority") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing field error") - secret.Data = map[string][]byte{ - "certFile": examplePublicKey, - } - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting artifact") - secret.Data["keyFile"] = examplePrivateKey - secret.Data["caFile"] = exampleCA - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing secret error") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ShouldNot(BeNil()) - }) - }) -}) diff --git a/controllers/storage.go b/controllers/storage.go deleted file mode 100644 index 3e0d4412a..000000000 --- a/controllers/storage.go +++ /dev/null @@ -1,448 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/gzip" - "crypto/sha1" - "fmt" - "hash" - "io" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/fluxcd/pkg/lockedfile" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/internal/fs" -) - -const ( - excludeFile = ".sourceignore" - excludeVCS = ".git/,.gitignore,.gitmodules,.gitattributes" - excludeExt = "*.jpg,*.jpeg,*.gif,*.png,*.wmv,*.flv,*.tar.gz,*.zip" -) - -// Storage manages artifacts -type Storage struct { - // BasePath is the local directory path where the source artifacts are stored. - BasePath string `json:"basePath"` - - // Hostname is the file server host name used to compose the artifacts URIs. - Hostname string `json:"hostname"` - - // Timeout for artifacts operations - Timeout time.Duration `json:"timeout"` -} - -// NewStorage creates the storage helper for a given path and hostname -func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { - if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { - return nil, fmt.Errorf("invalid dir path: %s", basePath) - } - return &Storage{ - BasePath: basePath, - Hostname: hostname, - Timeout: timeout, - }, nil -} - -// NewArtifactFor returns a new v1beta1.Artifact. -func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact { - path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName) - artifact := sourcev1.Artifact{ - Path: path, - Revision: revision, - } - s.SetArtifactURL(&artifact) - return artifact -} - -// SetArtifactURL sets the URL on the given v1beta1.Artifact. -func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { - if artifact.Path == "" { - return - } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) -} - -// SetHostname sets the hostname of the given URL string to the current Storage.Hostname -// and returns the result. -func (s Storage) SetHostname(URL string) string { - u, err := url.Parse(URL) - if err != nil { - return "" - } - u.Host = s.Hostname - return u.String() -} - -// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir. -func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error { - dir := filepath.Dir(s.LocalPath(artifact)) - return os.MkdirAll(dir, 0777) -} - -// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir. -func (s *Storage) RemoveAll(artifact sourcev1.Artifact) error { - dir := filepath.Dir(s.LocalPath(artifact)) - return os.RemoveAll(dir) -} - -// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, -// excluding the current one. -func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) error { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - var errors []string - _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - - if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { - if err := os.Remove(path); err != nil { - errors = append(errors, info.Name()) - } - } - return nil - }) - - if len(errors) > 0 { - return fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) - } - return nil -} - -// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage -// and is a regular file. -func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool { - fi, err := os.Lstat(s.LocalPath(artifact)) - if err != nil { - return false - } - return fi.Mode().IsRegular() -} - -// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact -// path, excluding any VCS specific files and directories, or any of the excludes defined in -// the excludeFiles. If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, ignore *string) (err error) { - if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { - return fmt.Errorf("invalid dir path: %s", dir) - } - - ps, err := loadExcludePatterns(dir, ignore) - if err != nil { - return err - } - matcher := gitignore.NewMatcher(ps) - - localPath := s.LocalPath(*artifact) - tf, err := ioutil.TempFile(filepath.Split(localPath)) - if err != nil { - return err - } - tmpName := tf.Name() - defer func() { - if err != nil { - os.Remove(tmpName) - } - }() - - h := newHash() - mw := io.MultiWriter(h, tf) - - gw := gzip.NewWriter(mw) - tw := tar.NewWriter(gw) - if err := writeToArchiveExcludeMatches(dir, matcher, tw); err != nil { - tw.Close() - gw.Close() - tf.Close() - return err - } - - if err := tw.Close(); err != nil { - gw.Close() - tf.Close() - return err - } - if err := gw.Close(); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tmpName, 0644); err != nil { - return err - } - - if err := fs.RenameWithFallback(tmpName, localPath); err != nil { - return err - } - - artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil)) - artifact.LastUpdateTime = metav1.Now() - return nil -} - -// writeToArchiveExcludeMatches walks over the given dir and writes any regular file that does -// not match the given gitignore.Matcher. -func writeToArchiveExcludeMatches(dir string, matcher gitignore.Matcher, writer *tar.Writer) error { - fn := func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - // Ignore anything that is not a file (directories, symlinks) - if !fi.Mode().IsRegular() { - return nil - } - - // Ignore excluded extensions and files - if matcher.Match(strings.Split(p, "/"), false) { - return nil - } - - header, err := tar.FileInfoHeader(fi, p) - if err != nil { - return err - } - // The name needs to be modified to maintain directory structure - // as tar.FileInfoHeader only has access to the base name of the file. - // Ref: https://golang.org/src/archive/tar/common.go?#L626 - relFilePath := p - if filepath.IsAbs(dir) { - relFilePath, err = filepath.Rel(dir, p) - if err != nil { - return err - } - } - header.Name = relFilePath - - if err := writer.WriteHeader(header); err != nil { - return err - } - - f, err := os.Open(p) - if err != nil { - f.Close() - return err - } - if _, err := io.Copy(writer, f); err != nil { - f.Close() - return err - } - return f.Close() - } - return filepath.Walk(dir, fn) -} - -// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path. -// If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := ioutil.TempFile(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - h := newHash() - mw := io.MultiWriter(h, tf) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tfName, mode); err != nil { - return err - } - - if err := fs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil)) - artifact.LastUpdateTime = metav1.Now() - return nil -} - -// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path. -// If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := ioutil.TempFile(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - h := newHash() - mw := io.MultiWriter(h, tf) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := fs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil)) - artifact.LastUpdateTime = metav1.Now() - return nil -} - -// CopyFromPath atomically copies the contents of the given path to the path of -// the v1beta1.Artifact. If successful, the checksum and last update time on the -// artifact is set. -func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) { - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - return s.Copy(artifact, f) -} - -// Symlink creates or updates a symbolic link for the given v1beta1.Artifact -// and returns the URL for the symlink. -func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - link := filepath.Join(dir, linkName) - tmpLink := link + ".tmp" - - if err := os.Remove(tmpLink); err != nil && !os.IsNotExist(err) { - return "", err - } - - if err := os.Symlink(localPath, tmpLink); err != nil { - return "", err - } - - if err := os.Rename(tmpLink, link); err != nil { - return "", err - } - - url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)) - return url, nil -} - -// Checksum returns the SHA1 checksum for the data of the given io.Reader as a string. -func (s *Storage) Checksum(reader io.Reader) string { - h := newHash() - _, _ = io.Copy(h, reader) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// Lock creates a file lock for the given v1beta1.Artifact. -func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) { - lockFile := s.LocalPath(artifact) + ".lock" - mutex := lockedfile.MutexAt(lockFile) - return mutex.Lock() -} - -// LocalPath returns the local path of the given artifact (that is: relative to -// the Storage.BasePath). -func (s *Storage) LocalPath(artifact sourcev1.Artifact) string { - if artifact.Path == "" { - return "" - } - return filepath.Join(s.BasePath, artifact.Path) -} - -// getPatterns collects ignore patterns from the given reader and returns them -// as a gitignore.Pattern slice. -func getPatterns(reader io.Reader, path []string) []gitignore.Pattern { - var ps []gitignore.Pattern - scanner := bufio.NewScanner(reader) - - for scanner.Scan() { - s := scanner.Text() - if !strings.HasPrefix(s, "#") && len(strings.TrimSpace(s)) > 0 { - ps = append(ps, gitignore.ParsePattern(s, path)) - } - } - - return ps -} - -// loadExcludePatterns loads the excluded patterns from sourceignore or other -// sources. -func loadExcludePatterns(dir string, ignore *string) ([]gitignore.Pattern, error) { - path := strings.Split(dir, "/") - - var ps []gitignore.Pattern - for _, p := range strings.Split(excludeVCS, ",") { - ps = append(ps, gitignore.ParsePattern(p, path)) - } - - if ignore == nil { - for _, p := range strings.Split(excludeExt, ",") { - ps = append(ps, gitignore.ParsePattern(p, path)) - } - - if f, err := os.Open(filepath.Join(dir, excludeFile)); err == nil { - defer f.Close() - ps = append(ps, getPatterns(f, path)...) - } else if !os.IsNotExist(err) { - return nil, err - } - } else { - ps = append(ps, getPatterns(bytes.NewBufferString(*ignore), path)...) - } - - return ps, nil -} - -// newHash returns a new SHA1 hash. -func newHash() hash.Hash { - return sha1.New() -} diff --git a/controllers/storage_test.go b/controllers/storage_test.go deleted file mode 100644 index fe1d8bc34..000000000 --- a/controllers/storage_test.go +++ /dev/null @@ -1,277 +0,0 @@ -package controllers - -import ( - "archive/tar" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "testing" - "time" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" -) - -type ignoreMap map[string]bool - -var remoteRepository = "https://github.com/fluxcd/source-controller" - -func init() { - // if this remote repo ever gets in your way, this is an escape; just set - // this to the url you want to clone. Be the source you want to be. - s := os.Getenv("REMOTE_REPOSITORY") - if s != "" { - remoteRepository = s - } -} - -func createStoragePath() (string, error) { - return ioutil.TempDir("", "") -} - -func cleanupStoragePath(dir string) func() { - return func() { os.RemoveAll(dir) } -} - -func TestStorageConstructor(t *testing.T) { - dir, err := createStoragePath() - if err != nil { - t.Fatal(err) - } - t.Cleanup(cleanupStoragePath(dir)) - - if _, err := NewStorage("/nonexistent", "hostname", time.Minute); err == nil { - t.Fatal("nonexistent path was allowable in storage constructor") - } - - f, err := ioutil.TempFile(dir, "") - if err != nil { - t.Fatalf("while creating temporary file: %v", err) - } - f.Close() - - if _, err := NewStorage(f.Name(), "hostname", time.Minute); err == nil { - os.Remove(f.Name()) - t.Fatal("file path was accepted as basedir") - } - os.Remove(f.Name()) - - if _, err := NewStorage(dir, "hostname", time.Minute); err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } -} - -// walks a tar.gz and looks for paths with the basename. It does not match -// symlinks properly at this time because that's painful. -func walkTar(tarFile string, match string) (bool, error) { - f, err := os.Open(tarFile) - if err != nil { - return false, fmt.Errorf("could not open file: %w", err) - } - defer f.Close() - - gzr, err := gzip.NewReader(f) - if err != nil { - return false, fmt.Errorf("could not unzip file: %w", err) - } - defer gzr.Close() - - tr := tar.NewReader(gzr) - for { - header, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return false, fmt.Errorf("Corrupt tarball reading header: %w", err) - } - - switch header.Typeflag { - case tar.TypeDir, tar.TypeReg: - if filepath.Base(header.Name) == match { - return true, nil - } - default: - // skip - } - } - - return false, nil -} - -func testPatterns(t *testing.T, storage *Storage, artifact sourcev1.Artifact, table ignoreMap) { - for name, expected := range table { - res, err := walkTar(storage.LocalPath(artifact), name) - if err != nil { - t.Fatalf("while reading tarball: %v", err) - } - - if res != expected { - if expected { - t.Fatalf("Could not find repository file matching %q in tarball for repo %q", name, remoteRepository) - } else { - t.Fatalf("Repository contained ignored file %q in tarball for repo %q", name, remoteRepository) - } - } - } -} - -func createArchive(t *testing.T, storage *Storage, filenames []string, sourceIgnore string, spec sourcev1.GitRepositorySpec) sourcev1.Artifact { - gitDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("could not create temporary directory: %v", err) - } - t.Cleanup(func() { os.RemoveAll(gitDir) }) - - if err := exec.Command("git", "clone", remoteRepository, gitDir).Run(); err != nil { - t.Fatalf("Could not clone remote repository: %v", err) - } - - // inject files.. just empty files - for _, name := range filenames { - f, err := os.Create(filepath.Join(gitDir, name)) - if err != nil { - t.Fatalf("Could not inject filename %q: %v", name, err) - } - f.Close() - } - - // inject sourceignore if not empty - if sourceIgnore != "" { - si, err := os.Create(filepath.Join(gitDir, ".sourceignore")) - if err != nil { - t.Fatalf("Could not create .sourceignore: %v", err) - } - - if _, err := io.WriteString(si, sourceIgnore); err != nil { - t.Fatalf("Could not write to .sourceignore: %v", err) - } - - si.Close() - } - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)+".tar.gz"), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - - if err := storage.Archive(&artifact, gitDir, spec.Ignore); err != nil { - t.Fatalf("archiving failed: %v", err) - } - - if !storage.ArtifactExist(artifact) { - t.Fatalf("artifact was created but does not exist: %+v", artifact) - } - - return artifact -} - -func stringPtr(s string) *string { - return &s -} - -func TestArchiveBasic(t *testing.T) { - table := ignoreMap{ - "README.md": true, - ".gitignore": false, - } - - dir, err := createStoragePath() - if err != nil { - t.Fatal(err) - } - t.Cleanup(cleanupStoragePath(dir)) - - storage, err := NewStorage(dir, "hostname", time.Minute) - if err != nil { - t.Fatalf("Error while bootstrapping storage: %v", err) - } - - testPatterns(t, storage, createArchive(t, storage, []string{"README.md", ".gitignore"}, "", sourcev1.GitRepositorySpec{}), table) -} - -func TestArchiveIgnore(t *testing.T) { - // this is a list of files that will be created in the repository for each - // subtest. it is manipulated later on. - filenames := []string{ - "foo.tar.gz", - "bar.jpg", - "bar.gif", - "foo.jpeg", - "video.flv", - "video.wmv", - "bar.png", - "foo.zip", - } - - // this is the table of ignored files and their values. true means that it's - // present in the resulting tarball. - table := ignoreMap{} - for _, item := range filenames { - table[item] = false - } - - dir, err := createStoragePath() - if err != nil { - t.Fatal(err) - } - t.Cleanup(cleanupStoragePath(dir)) - - storage, err := NewStorage(dir, "hostname", time.Minute) - if err != nil { - t.Fatalf("Error while bootstrapping storage: %v", err) - } - - t.Run("automatically ignored files", func(t *testing.T) { - testPatterns(t, storage, createArchive(t, storage, filenames, "", sourcev1.GitRepositorySpec{}), table) - }) - - table = ignoreMap{} - for _, item := range filenames { - table[item] = true - } - - t.Run("only vcs ignored files", func(t *testing.T) { - testPatterns(t, storage, createArchive(t, storage, filenames, "", sourcev1.GitRepositorySpec{Ignore: stringPtr("")}), table) - }) - - filenames = append(filenames, "test.txt") - table["test.txt"] = false - sourceIgnoreFile := "*.txt" - - t.Run("sourceignore injected via CRD", func(t *testing.T) { - testPatterns(t, storage, createArchive(t, storage, filenames, "", sourcev1.GitRepositorySpec{Ignore: stringPtr(sourceIgnoreFile)}), table) - }) - - table = ignoreMap{} - for _, item := range filenames { - table[item] = false - } - - t.Run("sourceignore injected via filename", func(t *testing.T) { - testPatterns(t, storage, createArchive(t, storage, filenames, sourceIgnoreFile, sourcev1.GitRepositorySpec{}), table) - }) -} - -func TestStorageRemoveAllButCurrent(t *testing.T) { - t.Run("bad directory in archive", func(t *testing.T) { - dir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { os.RemoveAll(dir) }) - - s, err := NewStorage(dir, "hostname", time.Minute) - if err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } - - if err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil { - t.Fatal("Did not error while pruning non-existent path") - } - }) -} diff --git a/controllers/suite_test.go b/controllers/suite_test.go deleted file mode 100644 index 8bba78892..000000000 --- a/controllers/suite_test.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage - -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) -} - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) - - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } - - var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := ioutil.TempDir("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") - - go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) - }() - - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) - } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -func loadExampleKeys() (err error) { - examplePublicKey, err = ioutil.ReadFile("testdata/certs/server.pem") - if err != nil { - return err - } - examplePrivateKey, err = ioutil.ReadFile("testdata/certs/server-key.pem") - if err != nil { - return err - } - exampleCA, err = ioutil.ReadFile("testdata/certs/ca.pem") - return err -} - -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) -} diff --git a/controllers/testdata/certs/ca-key.pem b/controllers/testdata/certs/ca-key.pem deleted file mode 100644 index b69de5ab5..000000000 --- a/controllers/testdata/certs/ca-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49 -AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H -dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA== ------END EC PRIVATE KEY----- diff --git a/controllers/testdata/certs/ca.csr b/controllers/testdata/certs/ca.csr deleted file mode 100644 index baa8aeb26..000000000 --- a/controllers/testdata/certs/ca.csr +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x -PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt -cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU -EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz -b34Wow== ------END CERTIFICATE REQUEST----- diff --git a/controllers/testdata/certs/ca.pem b/controllers/testdata/certs/ca.pem deleted file mode 100644 index 080bd24e6..000000000 --- a/controllers/testdata/certs/ca.pem +++ /dev/null @@ -1,11 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw -NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE -AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ -4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O -1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb -OD8EjjCMY69RMO0= ------END CERTIFICATE----- diff --git a/controllers/testdata/certs/server-key.pem b/controllers/testdata/certs/server-key.pem deleted file mode 100644 index 5054ff39f..000000000 --- a/controllers/testdata/certs/server-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49 -AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3 -fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg== ------END EC PRIVATE KEY----- diff --git a/controllers/testdata/certs/server.csr b/controllers/testdata/certs/server.csr deleted file mode 100644 index 5caf7b39c..000000000 --- a/controllers/testdata/certs/server.csr +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86gSzBJBgkqhkiG9w0BCQ4xPDA6 -MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiB5A6wvQ5x6g/zhiyn+wLzXsOaB -Gb/F25p/zTHHQqZbkwIhAPUgWzy/2bs6eZEi97bSlaRdmrqHwqT842t5sEwGyXNV ------END CERTIFICATE REQUEST----- diff --git a/controllers/testdata/certs/server.pem b/controllers/testdata/certs/server.pem deleted file mode 100644 index 11c655a0b..000000000 --- a/controllers/testdata/certs/server.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB7TCCAZKgAwIBAgIUB+17B8PU05wVTzRHLeG+S+ybZK4wCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMzAw -NDE1MDgxODAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86jgbowgbcwDgYDVR0PAQH/BAQD -AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTM8HS5EIlVMBYv/300jN8PEArUgDAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAOgB -5W82FEgiTTOmsNRekkK5jUPbj4D4eHtb2/BI7ph4AiEA2AxHASIFBdv5b7Qf5prb -bdNmUCzAvVuCAKuMjg2OPrE= ------END CERTIFICATE----- diff --git a/docs/api/source.md b/docs/api/source.md deleted file mode 100644 index c0de25e90..000000000 --- a/docs/api/source.md +++ /dev/null @@ -1,1811 +0,0 @@ -

Source API reference

-

Packages:

- -

source.toolkit.fluxcd.io/v1beta1

-

Package v1beta1 contains API Schema definitions for the source v1beta1 API group

-Resource Types: - -

Bucket -

-

Bucket is the Schema for the buckets API

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-apiVersion
-string
-source.toolkit.fluxcd.io/v1beta1 -
-kind
-string -
-Bucket -
-metadata
- - -Kubernetes meta/v1.ObjectMeta - - -
-Refer to the Kubernetes API documentation for the fields of the -metadata field. -
-spec
- - -BucketSpec - - -
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-provider
- -string - -
-(Optional) -

The S3 compatible storage provider name, default (‘generic’).

-
-bucketName
- -string - -
-

The bucket name.

-
-endpoint
- -string - -
-

The bucket endpoint address.

-
-insecure
- -bool - -
-(Optional) -

Insecure allows connecting to a non-TLS S3 HTTP endpoint.

-
-region
- -string - -
-(Optional) -

The bucket region.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

The name of the secret containing authentication credentials -for the Bucket.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check for bucket updates.

-
-timeout
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

The timeout for download operations, defaults to 20s.

-
-ignore
- -string - -
-(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-
-status
- - -BucketStatus - - -
-
-
-
-

GitRepository -

-

GitRepository is the Schema for the gitrepositories API

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-apiVersion
-string
-source.toolkit.fluxcd.io/v1beta1 -
-kind
-string -
-GitRepository -
-metadata
- - -Kubernetes meta/v1.ObjectMeta - - -
-Refer to the Kubernetes API documentation for the fields of the -metadata field. -
-spec
- - -GitRepositorySpec - - -
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-url
- -string - -
-

The repository URL, can be a HTTP/S or SSH address.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check for repository updates.

-
-timeout
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

The timeout for remote Git operations like cloning, defaults to 20s.

-
-ref
- - -GitRepositoryRef - - -
-(Optional) -

The Git reference to checkout and monitor for changes, defaults to -master branch.

-
-verify
- - -GitRepositoryVerification - - -
-(Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

-
-ignore
- -string - -
-(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-gitImplementation
- -string - -
-(Optional) -

Determines which git client library to use. -Defaults to go-git, valid values are (‘go-git’, ‘libgit2’).

-
-
-status
- - -GitRepositoryStatus - - -
-
-
-
-

HelmChart -

-

HelmChart is the Schema for the helmcharts API

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-apiVersion
-string
-source.toolkit.fluxcd.io/v1beta1 -
-kind
-string -
-HelmChart -
-metadata
- - -Kubernetes meta/v1.ObjectMeta - - -
-Refer to the Kubernetes API documentation for the fields of the -metadata field. -
-spec
- - -HelmChartSpec - - -
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - -
-chart
- -string - -
-

The name or path the Helm chart is available at in the SourceRef.

-
-version
- -string - -
-(Optional) -

The chart version semver expression, ignored for charts from GitRepository -and Bucket sources. Defaults to latest when omitted.

-
-sourceRef
- - -LocalHelmChartSourceReference - - -
-

The reference to the Source the chart is available at.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check the Source for updates.

-
-valuesFile
- -string - -
-(Optional) -

Alternative values file to use as the default chart values, expected to be a -relative path in the SourceRef. Ignored when omitted.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-
-status
- - -HelmChartStatus - - -
-
-
-
-

HelmRepository -

-

HelmRepository is the Schema for the helmrepositories API

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-apiVersion
-string
-source.toolkit.fluxcd.io/v1beta1 -
-kind
-string -
-HelmRepository -
-metadata
- - -Kubernetes meta/v1.ObjectMeta - - -
-Refer to the Kubernetes API documentation for the fields of the -metadata field. -
-spec
- - -HelmRepositorySpec - - -
-
-
- - - - - - - - - - - - - - - - - - - - - -
-url
- -string - -
-

The Helm repository URL, a valid URL contains at least a protocol and host.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

The name of the secret containing authentication credentials for the Helm -repository. -For HTTP/S basic auth the secret must contain username and -password fields. -For TLS the secret must contain a certFile and keyFile, and/or -caCert fields.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check the upstream for updates.

-
-timeout
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

The timeout of index downloading, defaults to 60s.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-
-status
- - -HelmRepositoryStatus - - -
-
-
-
-

Artifact -

-

-(Appears on: -BucketStatus, -GitRepositoryStatus, -HelmChartStatus, -HelmRepositoryStatus) -

-

Artifact represents the output of a source synchronisation.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-path
- -string - -
-

Path is the relative file path of this artifact.

-
-url
- -string - -
-

URL is the HTTP address of this artifact.

-
-revision
- -string - -
-(Optional) -

Revision is a human readable identifier traceable in the origin source -system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm -chart version, etc.

-
-checksum
- -string - -
-(Optional) -

Checksum is the SHA1 checksum of the artifact.

-
-lastUpdateTime
- - -Kubernetes meta/v1.Time - - -
-

LastUpdateTime is the timestamp corresponding to the last update of this -artifact.

-
-
-
-

BucketSpec -

-

-(Appears on: -Bucket) -

-

BucketSpec defines the desired state of an S3 compatible bucket

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-provider
- -string - -
-(Optional) -

The S3 compatible storage provider name, default (‘generic’).

-
-bucketName
- -string - -
-

The bucket name.

-
-endpoint
- -string - -
-

The bucket endpoint address.

-
-insecure
- -bool - -
-(Optional) -

Insecure allows connecting to a non-TLS S3 HTTP endpoint.

-
-region
- -string - -
-(Optional) -

The bucket region.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

The name of the secret containing authentication credentials -for the Bucket.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check for bucket updates.

-
-timeout
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

The timeout for download operations, defaults to 20s.

-
-ignore
- -string - -
-(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-
-
-

BucketStatus -

-

-(Appears on: -Bucket) -

-

BucketStatus defines the observed state of a bucket

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-observedGeneration
- -int64 - -
-(Optional) -

ObservedGeneration is the last observed generation.

-
-conditions
- - -[]Kubernetes meta/v1.Condition - - -
-(Optional) -

Conditions holds the conditions for the Bucket.

-
-url
- -string - -
-(Optional) -

URL is the download link for the artifact output of the last Bucket sync.

-
-artifact
- - -Artifact - - -
-(Optional) -

Artifact represents the output of the last successful Bucket sync.

-
-ReconcileRequestStatus
- - -github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus - - -
-

-(Members of ReconcileRequestStatus are embedded into this type.) -

-
-
-
-

GitRepositoryRef -

-

-(Appears on: -GitRepositorySpec) -

-

GitRepositoryRef defines the Git ref used for pull and checkout operations.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-branch
- -string - -
-(Optional) -

The Git branch to checkout, defaults to master.

-
-tag
- -string - -
-(Optional) -

The Git tag to checkout, takes precedence over Branch.

-
-semver
- -string - -
-(Optional) -

The Git tag semver expression, takes precedence over Tag.

-
-commit
- -string - -
-(Optional) -

The Git commit SHA to checkout, if specified Tag filters will be ignored.

-
-
-
-

GitRepositorySpec -

-

-(Appears on: -GitRepository) -

-

GitRepositorySpec defines the desired state of a Git repository.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-url
- -string - -
-

The repository URL, can be a HTTP/S or SSH address.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check for repository updates.

-
-timeout
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

The timeout for remote Git operations like cloning, defaults to 20s.

-
-ref
- - -GitRepositoryRef - - -
-(Optional) -

The Git reference to checkout and monitor for changes, defaults to -master branch.

-
-verify
- - -GitRepositoryVerification - - -
-(Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

-
-ignore
- -string - -
-(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-gitImplementation
- -string - -
-(Optional) -

Determines which git client library to use. -Defaults to go-git, valid values are (‘go-git’, ‘libgit2’).

-
-
-
-

GitRepositoryStatus -

-

-(Appears on: -GitRepository) -

-

GitRepositoryStatus defines the observed state of a Git repository.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-observedGeneration
- -int64 - -
-(Optional) -

ObservedGeneration is the last observed generation.

-
-conditions
- - -[]Kubernetes meta/v1.Condition - - -
-(Optional) -

Conditions holds the conditions for the GitRepository.

-
-url
- -string - -
-(Optional) -

URL is the download link for the artifact output of the last repository -sync.

-
-artifact
- - -Artifact - - -
-(Optional) -

Artifact represents the output of the last successful repository sync.

-
-ReconcileRequestStatus
- - -github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus - - -
-

-(Members of ReconcileRequestStatus are embedded into this type.) -

-
-
-
-

GitRepositoryVerification -

-

-(Appears on: -GitRepositorySpec) -

-

GitRepositoryVerification defines the OpenPGP signature verification process.

-
-
- - - - - - - - - - - - - - - - - -
FieldDescription
-mode
- -string - -
-

Mode describes what git object should be verified, currently (‘head’).

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-

The secret name containing the public keys of all trusted Git authors.

-
-
-
-

HelmChartSpec -

-

-(Appears on: -HelmChart) -

-

HelmChartSpec defines the desired state of a Helm chart.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-chart
- -string - -
-

The name or path the Helm chart is available at in the SourceRef.

-
-version
- -string - -
-(Optional) -

The chart version semver expression, ignored for charts from GitRepository -and Bucket sources. Defaults to latest when omitted.

-
-sourceRef
- - -LocalHelmChartSourceReference - - -
-

The reference to the Source the chart is available at.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check the Source for updates.

-
-valuesFile
- -string - -
-(Optional) -

Alternative values file to use as the default chart values, expected to be a -relative path in the SourceRef. Ignored when omitted.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-
-
-

HelmChartStatus -

-

-(Appears on: -HelmChart) -

-

HelmChartStatus defines the observed state of the HelmChart.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-observedGeneration
- -int64 - -
-(Optional) -

ObservedGeneration is the last observed generation.

-
-conditions
- - -[]Kubernetes meta/v1.Condition - - -
-(Optional) -

Conditions holds the conditions for the HelmChart.

-
-url
- -string - -
-(Optional) -

URL is the download link for the last chart pulled.

-
-artifact
- - -Artifact - - -
-(Optional) -

Artifact represents the output of the last successful chart sync.

-
-ReconcileRequestStatus
- - -github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus - - -
-

-(Members of ReconcileRequestStatus are embedded into this type.) -

-
-
-
-

HelmRepositorySpec -

-

-(Appears on: -HelmRepository) -

-

HelmRepositorySpec defines the reference to a Helm repository.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-url
- -string - -
-

The Helm repository URL, a valid URL contains at least a protocol and host.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

The name of the secret containing authentication credentials for the Helm -repository. -For HTTP/S basic auth the secret must contain username and -password fields. -For TLS the secret must contain a certFile and keyFile, and/or -caCert fields.

-
-interval
- - -Kubernetes meta/v1.Duration - - -
-

The interval at which to check the upstream for updates.

-
-timeout
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

The timeout of index downloading, defaults to 60s.

-
-suspend
- -bool - -
-(Optional) -

This flag tells the controller to suspend the reconciliation of this source.

-
-
-
-

HelmRepositoryStatus -

-

-(Appears on: -HelmRepository) -

-

HelmRepositoryStatus defines the observed state of the HelmRepository.

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-observedGeneration
- -int64 - -
-(Optional) -

ObservedGeneration is the last observed generation.

-
-conditions
- - -[]Kubernetes meta/v1.Condition - - -
-(Optional) -

Conditions holds the conditions for the HelmRepository.

-
-url
- -string - -
-(Optional) -

URL is the download link for the last index fetched.

-
-artifact
- - -Artifact - - -
-(Optional) -

Artifact represents the output of the last successful repository sync.

-
-ReconcileRequestStatus
- - -github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus - - -
-

-(Members of ReconcileRequestStatus are embedded into this type.) -

-
-
-
-

LocalHelmChartSourceReference -

-

-(Appears on: -HelmChartSpec) -

-

LocalHelmChartSourceReference contains enough information to let you locate -the typed referenced object at namespace level.

-
-
- - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-apiVersion
- -string - -
-(Optional) -

APIVersion of the referent.

-
-kind
- -string - -
-

Kind of the referent, valid values are (‘HelmRepository’, ‘GitRepository’, -‘Bucket’).

-
-name
- -string - -
-

Name of the referent.

-
-
-
-

Source -

-

Source interface must be supported by all API types.

-
-

This page was automatically generated with gen-crd-api-reference-docs

-
diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md new file mode 100644 index 000000000..935d74275 --- /dev/null +++ b/docs/api/v1/source.md @@ -0,0 +1,3724 @@ +

Source API reference v1

+

Packages:

+ +

source.toolkit.fluxcd.io/v1

+

Package v1 contains API Schema definitions for the source v1 API group

+Resource Types: + +

Bucket +

+

Bucket is the Schema for the buckets API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+Bucket +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +BucketSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+
+status
+ + +BucketStatus + + +
+
+
+
+

GitRepository +

+

GitRepository is the Schema for the gitrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+GitRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +GitRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘azure’, ‘github’, ‘generic’. +When not specified, defaults to ‘generic’.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+sparseCheckout
+ +[]string + +
+(Optional) +

SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

+
+
+status
+ + +GitRepositoryStatus + + +
+
+
+
+

HelmChart +

+

HelmChart is the Schema for the helmcharts API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmChart +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmChartSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+status
+ + +HelmChartStatus + + +
+
+
+
+

HelmRepository +

+

HelmRepository is the Schema for the helmrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+status
+ + +HelmRepositoryStatus + + +
+
+
+
+

OCIRepository +

+

OCIRepository is the Schema for the ocirepositories API

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+OCIRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +OCIRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+status
+ + +OCIRepositoryStatus + + +
+
+
+
+

BucketSTSSpec +

+

+(Appears on: +BucketSpec) +

+

BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider of the Security Token Service.

+
+endpoint
+ +string + +
+

Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the ldap provider.

+
+
+
+

BucketSpec +

+

+(Appears on: +Bucket) +

+

BucketSpec specifies the required configuration to produce an Artifact for +an object storage bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+
+
+

BucketStatus +

+

+(Appears on: +Bucket) +

+

BucketStatus records the observed state of a Bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the Bucket object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the Bucket.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful Bucket reconciliation.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

ExternalArtifact +

+

ExternalArtifact is the Schema for the external artifacts API

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +ExternalArtifactSpec + + +
+
+
+ + + + + +
+sourceRef
+ + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
+(Optional) +

SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

+
+
+status
+ + +ExternalArtifactStatus + + +
+
+
+
+

ExternalArtifactSpec +

+

+(Appears on: +ExternalArtifact) +

+

ExternalArtifactSpec defines the desired state of ExternalArtifact

+
+
+ + + + + + + + + + + + + +
FieldDescription
+sourceRef
+ + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
+(Optional) +

SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

+
+
+
+

ExternalArtifactStatus +

+

+(Appears on: +ExternalArtifact) +

+

ExternalArtifactStatus defines the observed state of ExternalArtifact

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of an ExternalArtifact reconciliation.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the ExternalArtifact.

+
+
+
+

GitRepositoryInclude +

+

+(Appears on: +GitRepositorySpec, +GitRepositoryStatus) +

+

GitRepositoryInclude specifies a local reference to a GitRepository which +Artifact (sub-)contents must be included, and where they should be placed.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+repository
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

GitRepositoryRef specifies the GitRepository which Artifact contents +must be included.

+
+fromPath
+ +string + +
+(Optional) +

FromPath specifies the path to copy contents from, defaults to the root +of the Artifact.

+
+toPath
+ +string + +
+(Optional) +

ToPath specifies the path to copy contents to, defaults to the name of +the GitRepositoryRef.

+
+
+
+

GitRepositoryRef +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryRef specifies the Git reference to resolve and checkout.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+branch
+ +string + +
+(Optional) +

Branch to check out, defaults to ‘master’ if no other field is defined.

+
+tag
+ +string + +
+(Optional) +

Tag to check out, takes precedence over Branch.

+
+semver
+ +string + +
+(Optional) +

SemVer tag expression to check out, takes precedence over Tag.

+
+name
+ +string + +
+(Optional) +

Name of the reference to check out; takes precedence over Branch, Tag and SemVer.

+

It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description +Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”

+
+commit
+ +string + +
+(Optional) +

Commit SHA to check out, takes precedence over all reference fields.

+

This can be combined with Branch to shallow clone the branch, in which +the commit is expected to exist.

+
+
+
+

GitRepositorySpec +

+

+(Appears on: +GitRepository) +

+

GitRepositorySpec specifies the required configuration to produce an +Artifact for a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘azure’, ‘github’, ‘generic’. +When not specified, defaults to ‘generic’.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+sparseCheckout
+ +[]string + +
+(Optional) +

SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

+
+
+
+

GitRepositoryStatus +

+

+(Appears on: +GitRepository) +

+

GitRepositoryStatus records the observed state of a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the GitRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the GitRepository.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful GitRepository reconciliation.

+
+includedArtifacts
+ + +[]github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

IncludedArtifacts contains a list of the last successfully included +Artifacts as instructed by GitRepositorySpec.Include.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedRecurseSubmodules
+ +bool + +
+(Optional) +

ObservedRecurseSubmodules is the observed resource submodules +configuration used to produce the current Artifact.

+
+observedInclude
+ + +[]GitRepositoryInclude + + +
+(Optional) +

ObservedInclude is the observed list of GitRepository resources used to +produce the current Artifact.

+
+observedSparseCheckout
+ +[]string + +
+(Optional) +

ObservedSparseCheckout is the observed list of directories used to +produce the current Artifact.

+
+sourceVerificationMode
+ + +GitVerificationMode + + +
+(Optional) +

SourceVerificationMode is the last used verification mode indicating +which Git object(s) have been verified.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

GitRepositoryVerification +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryVerification specifies the Git commit signature verification +strategy.

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mode
+ + +GitVerificationMode + + +
+(Optional) +

Mode specifies which Git object(s) should be verified.

+

The variants “head” and “HEAD” both imply the same thing, i.e. verify +the commit that the HEAD of the Git repository points to. The variant +“head” solely exists to ensure backwards compatibility.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

SecretRef specifies the Secret containing the public keys of trusted Git +authors.

+
+
+
+

GitVerificationMode +(string alias)

+

+(Appears on: +GitRepositoryStatus, +GitRepositoryVerification) +

+

GitVerificationMode specifies the verification mode for a Git repository.

+

HelmChartSpec +

+

+(Appears on: +HelmChart) +

+

HelmChartSpec specifies the desired state of a Helm chart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+
+

HelmChartStatus +

+

+(Appears on: +HelmChart) +

+

HelmChartStatus records the observed state of the HelmChart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmChart +object.

+
+observedSourceArtifactRevision
+ +string + +
+(Optional) +

ObservedSourceArtifactRevision is the last observed Artifact.Revision +of the HelmChartSpec.SourceRef.

+
+observedChartName
+ +string + +
+(Optional) +

ObservedChartName is the last observed chart name as specified by the +resolved chart reference.

+
+observedValuesFiles
+ +[]string + +
+(Optional) +

ObservedValuesFiles are the observed value files of the last successful +reconciliation. +It matches the chart in the last successfully reconciled artifact.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmChart.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

HelmRepositorySpec +

+

+(Appears on: +HelmRepository) +

+

HelmRepositorySpec specifies the required configuration to produce an +Artifact for a Helm repository index YAML.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+
+

HelmRepositoryStatus +

+

+(Appears on: +HelmRepository) +

+

HelmRepositoryStatus records the observed state of the HelmRepository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmRepository.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +HelmRepositoryStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful HelmRepository reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

LocalHelmChartSourceReference +

+

+(Appears on: +HelmChartSpec) +

+

LocalHelmChartSourceReference contains enough information to let you locate +the typed referenced object at namespace level.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+ +string + +
+(Optional) +

APIVersion of the referent.

+
+kind
+ +string + +
+

Kind of the referent, valid values are (‘HelmRepository’, ‘GitRepository’, +‘Bucket’).

+
+name
+ +string + +
+

Name of the referent.

+
+
+
+

OCILayerSelector +

+

+(Appears on: +OCIRepositorySpec, +OCIRepositoryStatus) +

+

OCILayerSelector specifies which layer should be extracted from an OCI Artifact

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mediaType
+ +string + +
+(Optional) +

MediaType specifies the OCI media type of the layer +which should be extracted from the OCI Artifact. The +first layer matching this type is selected.

+
+operation
+ +string + +
+(Optional) +

Operation specifies how the selected layer should be processed. +By default, the layer compressed content is extracted to storage. +When the operation is set to ‘copy’, the layer compressed content +is persisted to storage as it is.

+
+
+
+

OCIRepositoryRef +

+

+(Appears on: +OCIRepositorySpec) +

+

OCIRepositoryRef defines the image reference for the OCIRepository’s URL

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+digest
+ +string + +
+(Optional) +

Digest is the image digest to pull, takes precedence over SemVer. +The value should be in the format ‘sha256:’.

+
+semver
+ +string + +
+(Optional) +

SemVer is the range of tags to pull selecting the latest within +the range, takes precedence over Tag.

+
+semverFilter
+ +string + +
+(Optional) +

SemverFilter is a regex pattern to filter the tags within the SemVer range.

+
+tag
+ +string + +
+(Optional) +

Tag is the image tag to pull, defaults to latest.

+
+
+
+

OCIRepositorySpec +

+

+(Appears on: +OCIRepository) +

+

OCIRepositorySpec defines the desired state of OCIRepository

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+
+

OCIRepositoryStatus +

+

+(Appears on: +OCIRepository) +

+

OCIRepositoryStatus defines the observed state of OCIRepository

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the OCIRepository.

+
+url
+ +string + +
+(Optional) +

URL is the download link for the artifact output of the last OCI Repository sync.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful OCI Repository sync.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedLayerSelector
+ + +OCILayerSelector + + +
+(Optional) +

ObservedLayerSelector is the observed layer selector used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

OCIRepositoryVerification +

+

+(Appears on: +HelmChartSpec, +OCIRepositorySpec) +

+

OCIRepositoryVerification verifies the authenticity of an OCI Artifact

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider specifies the technology used to sign the OCI Artifact.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Kubernetes Secret containing the +trusted public keys.

+
+matchOIDCIdentity
+ + +[]OIDCIdentityMatch + + +
+(Optional) +

MatchOIDCIdentity specifies the identity matching criteria to use +while verifying an OCI artifact which was signed using Cosign keyless +signing. The artifact’s identity is deemed to be verified if any of the +specified matchers match against the identity.

+
+
+
+

OIDCIdentityMatch +

+

+(Appears on: +OCIRepositoryVerification) +

+

OIDCIdentityMatch specifies options for verifying the certificate identity, +i.e. the issuer and the subject of the certificate.

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+issuer
+ +string + +
+

Issuer specifies the regex pattern to match against to verify +the OIDC issuer in the Fulcio certificate. The pattern must be a +valid Go regular expression.

+
+subject
+ +string + +
+

Subject specifies the regex pattern to match against to verify +the identity subject in the Fulcio certificate. The pattern must +be a valid Go regular expression.

+
+
+
+

Source +

+

Source interface must be supported by all API types. +Source is the interface that provides generic access to the Artifact and +interval. It must be supported by all kinds of the source.toolkit.fluxcd.io +API group.

+
+

This page was automatically generated with gen-crd-api-reference-docs

+
diff --git a/docs/api/v1beta2/source.md b/docs/api/v1beta2/source.md new file mode 100644 index 000000000..8234f7014 --- /dev/null +++ b/docs/api/v1beta2/source.md @@ -0,0 +1,3604 @@ +

Source API reference v1beta2

+

Packages:

+ +

source.toolkit.fluxcd.io/v1beta2

+

Package v1beta2 contains API Schema definitions for the source v1beta2 API group

+Resource Types: + +

Bucket +

+

Bucket is the Schema for the buckets API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1beta2 +
+kind
+string +
+Bucket +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +BucketSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+
+status
+ + +BucketStatus + + +
+
+
+
+

GitRepository +

+

GitRepository is the Schema for the gitrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1beta2 +
+kind
+string +
+GitRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +GitRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which to check the GitRepository for updates.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+gitImplementation
+ +string + +
+(Optional) +

GitImplementation specifies which Git client library implementation to +use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’). +Deprecated: gitImplementation is deprecated now that ‘go-git’ is the +only supported implementation.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+
+status
+ + +GitRepositoryStatus + + +
+
+
+
+

HelmChart +

+

HelmChart is the Schema for the helmcharts API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1beta2 +
+kind
+string +
+HelmChart +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmChartSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+valuesFile
+ +string + +
+(Optional) +

ValuesFile is an alternative values file to use as the default chart +values, expected to be a relative path in the SourceRef. Deprecated in +favor of ValuesFiles, for backwards compatibility the file specified here +is merged before the ValuesFiles items. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+verify
+ + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+status
+ + +HelmChartStatus + + +
+
+
+
+

HelmRepository +

+

HelmRepository is the Schema for the helmrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1beta2 +
+kind
+string +
+HelmRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+status
+ + +HelmRepositoryStatus + + +
+
+
+
+

OCIRepository +

+

OCIRepository is the Schema for the ocirepositories API

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1beta2 +
+kind
+string +
+OCIRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +OCIRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

Note: Support for the caFile, certFile and keyFile keys have +been deprecated.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+status
+ + +OCIRepositoryStatus + + +
+
+
+
+

Artifact +

+

Artifact represents the output of a Source reconciliation.

+

Deprecated: use Artifact from api/v1 instead. This type will be removed in +a future release.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+path
+ +string + +
+

Path is the relative file path of the Artifact. It can be used to locate +the file in the root of the Artifact storage on the local file system of +the controller managing the Source.

+
+url
+ +string + +
+

URL is the HTTP address of the Artifact as exposed by the controller +managing the Source. It can be used to retrieve the Artifact for +consumption, e.g. by another controller applying the Artifact contents.

+
+revision
+ +string + +
+(Optional) +

Revision is a human-readable identifier traceable in the origin source +system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.

+
+checksum
+ +string + +
+(Optional) +

Checksum is the SHA256 checksum of the Artifact file. +Deprecated: use Artifact.Digest instead.

+
+digest
+ +string + +
+(Optional) +

Digest is the digest of the file in the form of ‘:’.

+
+lastUpdateTime
+ + +Kubernetes meta/v1.Time + + +
+

LastUpdateTime is the timestamp corresponding to the last update of the +Artifact.

+
+size
+ +int64 + +
+(Optional) +

Size is the number of bytes in the file.

+
+metadata
+ +map[string]string + +
+(Optional) +

Metadata holds upstream information such as OCI annotations.

+
+
+
+

BucketSTSSpec +

+

+(Appears on: +BucketSpec) +

+

BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider of the Security Token Service.

+
+endpoint
+ +string + +
+

Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the ldap provider.

+
+
+
+

BucketSpec +

+

+(Appears on: +Bucket) +

+

BucketSpec specifies the required configuration to produce an Artifact for +an object storage bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+
+
+

BucketStatus +

+

+(Appears on: +Bucket) +

+

BucketStatus records the observed state of a Bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the Bucket object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the Bucket.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/source-controller/api/v1.Artifact + + +
+(Optional) +

Artifact represents the last successful Bucket reconciliation.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

GitRepositoryInclude +

+

+(Appears on: +GitRepositorySpec, +GitRepositoryStatus) +

+

GitRepositoryInclude specifies a local reference to a GitRepository which +Artifact (sub-)contents must be included, and where they should be placed.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+repository
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

GitRepositoryRef specifies the GitRepository which Artifact contents +must be included.

+
+fromPath
+ +string + +
+(Optional) +

FromPath specifies the path to copy contents from, defaults to the root +of the Artifact.

+
+toPath
+ +string + +
+(Optional) +

ToPath specifies the path to copy contents to, defaults to the name of +the GitRepositoryRef.

+
+
+
+

GitRepositoryRef +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryRef specifies the Git reference to resolve and checkout.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+branch
+ +string + +
+(Optional) +

Branch to check out, defaults to ‘master’ if no other field is defined.

+
+tag
+ +string + +
+(Optional) +

Tag to check out, takes precedence over Branch.

+
+semver
+ +string + +
+(Optional) +

SemVer tag expression to check out, takes precedence over Tag.

+
+name
+ +string + +
+(Optional) +

Name of the reference to check out; takes precedence over Branch, Tag and SemVer.

+

It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description +Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”

+
+commit
+ +string + +
+(Optional) +

Commit SHA to check out, takes precedence over all reference fields.

+

This can be combined with Branch to shallow clone the branch, in which +the commit is expected to exist.

+
+
+
+

GitRepositorySpec +

+

+(Appears on: +GitRepository) +

+

GitRepositorySpec specifies the required configuration to produce an +Artifact for a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which to check the GitRepository for updates.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+gitImplementation
+ +string + +
+(Optional) +

GitImplementation specifies which Git client library implementation to +use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’). +Deprecated: gitImplementation is deprecated now that ‘go-git’ is the +only supported implementation.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+
+
+

GitRepositoryStatus +

+

+(Appears on: +GitRepository) +

+

GitRepositoryStatus records the observed state of a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the GitRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the GitRepository.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +GitRepositoryStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/source-controller/api/v1.Artifact + + +
+(Optional) +

Artifact represents the last successful GitRepository reconciliation.

+
+includedArtifacts
+ + +[]github.com/fluxcd/source-controller/api/v1.Artifact + + +
+(Optional) +

IncludedArtifacts contains a list of the last successfully included +Artifacts as instructed by GitRepositorySpec.Include.

+
+contentConfigChecksum
+ +string + +
+(Optional) +

ContentConfigChecksum is a checksum of all the configurations related to +the content of the source artifact: +- .spec.ignore +- .spec.recurseSubmodules +- .spec.included and the checksum of the included artifacts +observed in .status.observedGeneration version of the object. This can +be used to determine if the content of the included repository has +changed. +It has the format of <algo>:<checksum>, for example: sha256:<checksum>.

+

Deprecated: Replaced with explicit fields for observed artifact content +config in the status.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedRecurseSubmodules
+ +bool + +
+(Optional) +

ObservedRecurseSubmodules is the observed resource submodules +configuration used to produce the current Artifact.

+
+observedInclude
+ + +[]GitRepositoryInclude + + +
+(Optional) +

ObservedInclude is the observed list of GitRepository resources used to +to produce the current Artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

GitRepositoryVerification +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryVerification specifies the Git commit signature verification +strategy.

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mode
+ +string + +
+

Mode specifies what Git object should be verified, currently (‘head’).

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

SecretRef specifies the Secret containing the public keys of trusted Git +authors.

+
+
+
+

HelmChartSpec +

+

+(Appears on: +HelmChart) +

+

HelmChartSpec specifies the desired state of a Helm chart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+valuesFile
+ +string + +
+(Optional) +

ValuesFile is an alternative values file to use as the default chart +values, expected to be a relative path in the SourceRef. Deprecated in +favor of ValuesFiles, for backwards compatibility the file specified here +is merged before the ValuesFiles items. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+verify
+ + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+
+

HelmChartStatus +

+

+(Appears on: +HelmChart) +

+

HelmChartStatus records the observed state of the HelmChart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmChart +object.

+
+observedSourceArtifactRevision
+ +string + +
+(Optional) +

ObservedSourceArtifactRevision is the last observed Artifact.Revision +of the HelmChartSpec.SourceRef.

+
+observedChartName
+ +string + +
+(Optional) +

ObservedChartName is the last observed chart name as specified by the +resolved chart reference.

+
+observedValuesFiles
+ +[]string + +
+(Optional) +

ObservedValuesFiles are the observed value files of the last successful +reconciliation. +It matches the chart in the last successfully reconciled artifact.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmChart.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/source-controller/api/v1.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

HelmRepositorySpec +

+

+(Appears on: +HelmRepository) +

+

HelmRepositorySpec specifies the required configuration to produce an +Artifact for a Helm repository index YAML.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+
+

HelmRepositoryStatus +

+

+(Appears on: +HelmRepository) +

+

HelmRepositoryStatus records the observed state of the HelmRepository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmRepository.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +HelmRepositoryStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/source-controller/api/v1.Artifact + + +
+(Optional) +

Artifact represents the last successful HelmRepository reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

LocalHelmChartSourceReference +

+

+(Appears on: +HelmChartSpec) +

+

LocalHelmChartSourceReference contains enough information to let you locate +the typed referenced object at namespace level.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+ +string + +
+(Optional) +

APIVersion of the referent.

+
+kind
+ +string + +
+

Kind of the referent, valid values are (‘HelmRepository’, ‘GitRepository’, +‘Bucket’).

+
+name
+ +string + +
+

Name of the referent.

+
+
+
+

OCILayerSelector +

+

+(Appears on: +OCIRepositorySpec, +OCIRepositoryStatus) +

+

OCILayerSelector specifies which layer should be extracted from an OCI Artifact

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mediaType
+ +string + +
+(Optional) +

MediaType specifies the OCI media type of the layer +which should be extracted from the OCI Artifact. The +first layer matching this type is selected.

+
+operation
+ +string + +
+(Optional) +

Operation specifies how the selected layer should be processed. +By default, the layer compressed content is extracted to storage. +When the operation is set to ‘copy’, the layer compressed content +is persisted to storage as it is.

+
+
+
+

OCIRepositoryRef +

+

+(Appears on: +OCIRepositorySpec) +

+

OCIRepositoryRef defines the image reference for the OCIRepository’s URL

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+digest
+ +string + +
+(Optional) +

Digest is the image digest to pull, takes precedence over SemVer. +The value should be in the format ‘sha256:’.

+
+semver
+ +string + +
+(Optional) +

SemVer is the range of tags to pull selecting the latest within +the range, takes precedence over Tag.

+
+semverFilter
+ +string + +
+(Optional) +

SemverFilter is a regex pattern to filter the tags within the SemVer range.

+
+tag
+ +string + +
+(Optional) +

Tag is the image tag to pull, defaults to latest.

+
+
+
+

OCIRepositorySpec +

+

+(Appears on: +OCIRepository) +

+

OCIRepositorySpec defines the desired state of OCIRepository

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

Note: Support for the caFile, certFile and keyFile keys have +been deprecated.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+
+

OCIRepositoryStatus +

+

+(Appears on: +OCIRepository) +

+

OCIRepositoryStatus defines the observed state of OCIRepository

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the OCIRepository.

+
+url
+ +string + +
+(Optional) +

URL is the download link for the artifact output of the last OCI Repository sync.

+
+artifact
+ + +github.com/fluxcd/source-controller/api/v1.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful OCI Repository sync.

+
+contentConfigChecksum
+ +string + +
+(Optional) +

ContentConfigChecksum is a checksum of all the configurations related to +the content of the source artifact: +- .spec.ignore +- .spec.layerSelector +observed in .status.observedGeneration version of the object. This can +be used to determine if the content configuration has changed and the +artifact needs to be rebuilt. +It has the format of <algo>:<checksum>, for example: sha256:<checksum>.

+

Deprecated: Replaced with explicit fields for observed artifact content +config in the status.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedLayerSelector
+ + +OCILayerSelector + + +
+(Optional) +

ObservedLayerSelector is the observed layer selector used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

Source +

+

Source interface must be supported by all API types. +Source is the interface that provides generic access to the Artifact and +interval. It must be supported by all kinds of the source.toolkit.fluxcd.io +API group.

+

Deprecated: use the Source interface from api/v1 instead. This type will be +removed in a future release.

+
+

This page was automatically generated with gen-crd-api-reference-docs

+
diff --git a/docs/spec/README.md b/docs/spec/README.md index 16ca15fd5..ed8cd38f3 100644 --- a/docs/spec/README.md +++ b/docs/spec/README.md @@ -1,65 +1,7 @@ # Source Controller -The main goal is to define a set of Kubernetes objects that cluster -admins and various automated operators can interact with to offload -the sources (e.g. Git and Helm repositories) registration, authentication, -verification and resource fetching to a dedicated controller. - -## Motivation - -Each Flux and each Helm operator mirrors the Git repositories they are -using, in the same way, using the same code. But other components -might benefit from access to the source mirrors, and Flux and the Helm -operator could work more in sympathy with Kubernetes by factoring it out. - -If "sources" (usually git repos, but also Helm charts and potentially -other things) existed in their own right as Kubernetes resources, -components like Flux and Helm operator could use standard Kubernetes -mechanisms to build on them; and, they could be managed independently -of the components using them. - ## API Specification +* [v1](v1/README.md) +* [v1beta2](v1beta2/README.md) * [v1beta1](v1beta1/README.md) - -## Implementation - -The controller implementation will watch for source objects in a cluster and act on them. -The actions performed by the source controller could be: - -* validate source definitions -* authenticate to sources and validate authenticity -* detect source changes based on update policies (semver) -* fetch resources on-demand and on-a-schedule -* package the fetched resources into a well known format (tar.gz, yaml) -* store the artifacts locally -* make the artifacts addressable by their source identifier (sha, version, ts) -* make the artifacts available in-cluster to interested 3rd parties -* notify interested 3rd parties of source changes and availability (status conditions, events, hooks) - -## Impact to Flux - -Having a dedicated controller that manages Git repositories defined with Kubernetes custom resources would: - -* simplify Flux configuration as fluxd could subscribe to Git sources in-cluster and pull the artifacts -automatically without manual intervention from users to reconfigure and redeploy FLux -* improve the installation experience as users will not have to patch fluxd's deployment to inject -the HTTPS basic auth credentials, change the source URL or other Git and PGP related settings -* enable fluxd to compose the desired state of a cluster from multiple sources by applying all artifacts present in flux namespace -* enable fluxd to apply manifests coming from other sources than Git, e.g. S3 buckets -* allow fluxd to run under a non-root user as it wouldn't need to shell out to ssh-keygen, git or pgp -* enable fluxd to apply manifests coming from the most recent semver tag of a Git repository -* allow user to pin the cluster desired state to a specific Git commit or Git tag - -## Impact to Helm Operator - -Having a dedicated controller that manages Helm repositories and charts defined with Kubernetes custom -resources would: - -* simplify the Helm Operator configuration as repository and chart definitions can be re-used across - `HelmRelease` resources (see [fluxcd/helm-operator#142](https://github.com/fluxcd/helm-operator/issues/142)) -* improve the user experience as repositories requiring authentication will no longer require a - `repositories.yaml` import / file mount -* simplify the architecture of the Helm Operator as it allows the operator to work with a single - source type (`HelmChart`) and way of preparing and executing installations and/or upgrades -* allow the Helm Operator to run under a non-root user as it wouldn't need to shell out to git diff --git a/docs/spec/v1/README.md b/docs/spec/v1/README.md new file mode 100644 index 000000000..f08ea805f --- /dev/null +++ b/docs/spec/v1/README.md @@ -0,0 +1,22 @@ +# source.toolkit.fluxcd.io/v1 + +This is the v1 API specification for defining the desired state sources of Kubernetes clusters. + +## Specification + +* Source kinds: + + [GitRepository](gitrepositories.md) + + [OCIRepository](ocirepositories.md) + + [HelmRepository](helmrepositories.md) + + [HelmChart](helmcharts.md) + + [Bucket](buckets.md) + +## Implementation + +* [source-controller](https://github.com/fluxcd/source-controller/) + +## Consumers + +* [kustomize-controller](https://github.com/fluxcd/kustomize-controller/) +* [helm-controller](https://github.com/fluxcd/helm-controller/) +* [source-watcher](https://github.com/fluxcd/source-watcher/) diff --git a/docs/spec/v1/buckets.md b/docs/spec/v1/buckets.md new file mode 100644 index 000000000..077ac952b --- /dev/null +++ b/docs/spec/v1/buckets.md @@ -0,0 +1,1433 @@ +# Buckets + + + +The `Bucket` API defines a Source to produce an Artifact for objects from storage +solutions like Amazon S3, Google Cloud Storage buckets, or any other solution +with a S3 compatible API such as Minio, Alibaba Cloud OSS and others. + +## Example + +The following is an example of a Bucket. It creates a tarball (`.tar.gz`) +Artifact with the fetched objects from an object storage with an S3 +compatible API (e.g. [Minio](https://min.io)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: minio-bucket + namespace: default +spec: + interval: 5m0s + endpoint: minio.example.com + insecure: true + secretRef: + name: minio-bucket-secret + bucketName: example +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-bucket-secret + namespace: default +type: Opaque +stringData: + accesskey: + secretkey: +``` + +In the above example: + +- A Bucket named `minio-bucket` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the object storage bucket every five minutes, + indicated by the `.spec.interval` field. +- It authenticates to the `minio.example.com` endpoint with + the static credentials from the `minio-secret` Secret data, indicated by + the `.spec.endpoint` and `.spec.secretRef.name` fields. +- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag) + in the `.spec.bucketName` bucket is compiled, while filtering the keys using + [default ignore rules](#default-exclusions). +- The digest (algorithm defaults to SHA256) of the list is used as Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current Bucket revision differs from the latest calculated revision, + all objects are fetched and archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `bucket.yaml`, and +changing the Bucket and Secret values to target a Minio instance you have +control over. + +**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see +[Provider](#provider). + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f bucket.yaml + ``` + +2. Run `kubectl get buckets` to see the Bucket: + + ```console + NAME ENDPOINT AGE READY STATUS + minio-bucket minio.example.com 34s True stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ``` + +3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the Bucket's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 + Last Update Time: 2024-02-01T23:43:38Z + Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Revision: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Size: 38099 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Conditions: + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 82s source-controller stored artifact with 16 fetched files from 'example' bucket + ``` + +## Writing a Bucket spec + +As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and +`metadata` fields. The name of a Bucket object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A Bucket also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Provider + +The `.spec.provider` field allows for specifying a Provider to enable provider +specific configurations, for example to communicate with a non-S3 compatible +API endpoint, or to change the authentication method. + +Supported options are: + +- [Generic](#generic) +- [AWS](#aws) +- [Azure](#azure) +- [GCP](#gcp) + +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### Generic + +When a Bucket's `spec.provider` is set to `generic`, the controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go), which can communicate +with any Amazon S3 compatible object storage (including +[GCS](https://cloud.google.com/storage/docs/interoperability), +[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-), +and many others). + +The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a +Secret with `.data.accesskey` and `.data.secretkey` values, used to +authenticate with static credentials. + +The Provider allows for specifying a region the bucket is in using the +[`.spec.region` field](#region), if required by the [Endpoint](#endpoint). + +##### Generic example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: generic-insecure + namespace: default +spec: + provider: generic + interval: 5m0s + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + timeout: 60s + insecure: true + secretRef: + name: minio-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### AWS + +When a Bucket's `.spec.provider` field is set to `aws`, the source-controller +will attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go). + +Without a [Secret reference](#secret-reference), authorization using +credentials retrieved from the AWS EC2 service is attempted by default. When +a reference is specified, it expects a Secret with `.data.accesskey` and +`.data.secretkey` values, used to authenticate with static credentials. + +The Provider allows for specifying the +[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) +using the [`.spec.region` field](#region). + +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/aws/#for-amazon-simple-storage-service + +##### AWS EC2 example + +**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for +the source-controller service account that grants access to the bucket. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS IAM role example + +Replace `` with the specified `.spec.bucketName`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} +``` + +##### AWS static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + secretRef: + name: aws-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: aws-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +##### AWS Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + serviceAccountName: aws-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-workload-identity-sa + namespace: default + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/flux-bucket-role +``` + +#### Azure + +When a Bucket's `.spec.provider` is set to `azure`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Azure Blob Storage SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob). + +Without a [Secret reference](#secret-reference), authentication using a chain +with: + +- [Environment credentials](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential) +- [Workload Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential) +- [Managed Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) + with the `AZURE_CLIENT_ID` +- Managed Identity with a system-assigned identity + +is attempted by default. If no chain can be established, the bucket +is assumed to be publicly reachable. + +When a reference is specified, it expects a Secret with one of the following +sets of `.data` fields: + +- `tenantId`, `clientId` and `clientSecret` for authenticating a Service + Principal with a secret. +- `tenantId`, `clientId` and `clientCertificate` (plus optionally + `clientCertificatePassword` and/or `clientCertificateSendChain`) for + authenticating a Service Principal with a certificate. +- `clientId` for authenticating using a Managed Identity. +- `accountKey` for authenticating using a + [Shared Key](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#SharedKeyCredential). +- `sasKey` for authenticating using a [SAS Token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) + +For any Managed Identity and/or Microsoft Entra ID (Formerly Azure Active Directory) authentication method, +the base URL can be configured using `.data.authorityHost`. If not supplied, +[`AzurePublicCloud` is assumed](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AuthorityHost). + +##### Azure example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-public + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: podinfo + endpoint: https://podinfoaccount.blob.core.windows.net + timeout: 30s +``` + +##### Azure Service Principal Secret example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-secret + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientSecret: +``` + +##### Azure Service Principal Certificate example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-cert + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientCertificate: + # Plus optionally + clientCertificatePassword: + clientCertificateSendChain: # either "1" or "true" +``` + +##### Azure Managed Identity with Client ID example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-managed-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-smi-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-smi-auth + namespace: default +type: Opaque +data: + clientId: +``` + +##### Azure Blob Shared Key example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-shared-key + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + accountKey: +``` + +##### Workload Identity + +If you have [Workload Identity](https://azure.github.io/azure-workload-identity/docs/installation/managed-clusters.html) +set up on your cluster, you need to create an Azure Identity and give it +access to Azure Blob Storage. + +```shell +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +``` + +Establish a federated identity between the Identity and the source-controller +ServiceAccount. + +```shell +export SERVICE_ACCOUNT_ISSUER="$(az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -otsv)" + +az identity federated-credential create \ + --name "kubernetes-federated-credential" \ + --identity-name "${IDENTITY_NAME}" \ + --resource-group "${RESOURCE_GROUP}" \ + --issuer "${SERVICE_ACCOUNT_ISSUER}" \ + --subject "system:serviceaccount:flux-system:source-controller" +``` + +Add a patch to label and annotate the source-controller Deployment and ServiceAccount +correctly so that it can match an identity binding: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +If you have set up Workload Identity correctly and labeled the source-controller +Deployment and ServiceAccount, then you don't need to reference a Secret. For more information, +please see [documentation](https://azure.github.io/azure-workload-identity/docs/quick-start.html). + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net +``` + +##### Azure Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net + serviceAccountName: azure-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-workload-identity-sa + namespace: default + annotations: + azure.workload.identity/client-id: + azure.workload.identity/tenant-id: +``` + +##### Azure Blob SAS Token example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-sas-token + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + sasKey: +``` + +The `sasKey` only contains the SAS token e.g +`?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. +The leading question mark (`?`) is optional. The query values from the `sasKey` +data field in the Secrets gets merged with the ones in the `.spec.endpoint` of +the Bucket. If the same key is present in the both of them, the value in the +`sasKey` takes precedence. + +**Note:** The SAS token has an expiry date, and it must be updated before it +expires to allow Flux to continue to access Azure Storage. It is allowed to use +an account-level or container-level SAS token. + +The minimum permissions for an account-level SAS token are: + +- Allowed services: `Blob` +- Allowed resource types: `Container`, `Object` +- Allowed permissions: `Read`, `List` + +The minimum permissions for a container-level SAS token are: + +- Allowed permissions: `Read`, `List` + +Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/storageservices/create-account-sas#blob-service) for a full overview on permissions. + +#### GCP + +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/gcp/#for-google-cloud-storage + +##### GCP Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +##### GCP Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + serviceAccountName: gcp-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: gcp-workload-identity-sa + namespace: default + annotations: + iam.gke.io/gcp-service-account: +``` + +##### GCP static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-secret + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: + endpoint: storage.googleapis.com + region: + secretRef: + name: gcp-service-account +--- +apiVersion: v1 +kind: Secret +metadata: + name: gcp-service-account + namespace: default +type: Opaque +data: + serviceaccount: +``` + +Where the (base64 decoded) value of `.data.serviceaccount` looks like this: + +```json +{ + "type": "service_account", + "project_id": "example", + "private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2", + "private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n", + "client_email": "test@example.iam.gserviceaccount.com", + "client_id": "32657634678762536746", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com" +} +``` + +### Interval + +`.spec.interval` is a required field that specifies the interval which the +object storage bucket must be consulted at. + +After successfully reconciling a Bucket object, the source-controller requeues +the object for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the object storage bucket every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. the apply of a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple Bucket objects are set up +with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Endpoint + +`.spec.endpoint` is a required field that specifies the HTTP/S object storage +endpoint to connect to and fetch objects from. Connecting to an (insecure) +HTTP endpoint requires enabling [`.spec.insecure`](#insecure). + +Some endpoints require the specification of a [`.spec.region`](#region), +see [Provider](#provider) for more (provider specific) examples. + +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Bucket name + +`.spec.bucketName` is a required field that specifies which object storage +bucket on the [Endpoint](#endpoint) objects should be fetched from. + +See [Provider](#provider) for more (provider specific) examples. + +### Region + +`.spec.region` is an optional field to specify the region a +[`.spec.bucketName`](#bucket-name) is located in. + +See [Provider](#provider) for more (provider specific) examples. + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards a bucket using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +[endpoint](#endpoint), if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for object storage +fetch operations. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. +The default value is `60s`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the Bucket, containing authentication +credentials for the object storage. For some `.spec.provider` implementations +the presence of the field is required, see [Provider](#provider) for more +details and examples. + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as Bucket with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible object storage, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +**Important:** `.spec.secretRef` and `.spec.serviceAccountName` are mutually +exclusive and cannot be set at the same time. This constraint is enforced +at the CRD level. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Prefix + +`.spec.prefix` is an optional field to enable server-side filtering +of files in the Bucket. + +**Note:** The server-side filtering works only with the `generic`, `aws` +and `gcp` [provider](#provider) and is preferred over [`.spec.ignore`](#ignore) +as a more efficient way of excluding files. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage +objects which keys match the defined rules are excluded while fetching. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket. +When set to `true`, the controller will stop reconciling the Bucket, and changes +to the resource or in the object storage bucket will not result in a new +Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with Buckets + +### Excluding files + +By default, storage bucket objects which match the [default exclusion +rules](#default-exclusions) are excluded while fetching. It is possible to +overwrite and/or overrule the default exclusions using a file in the bucket +and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the root of the +object storage bucket. The `.sourceignore` file follows [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +#### Ignore spec + +Another option is to define the exclusions within the Bucket spec, using the +[`.spec.ignore` field](#ignore). Specified rules override the +[default exclusion list](#default-exclusions), and may overrule `.sourceignore` +file exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a Bucket outside the +[specified interval window](#interval), a Bucket can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the Bucket for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite bucket/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source bucket +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the Bucket to reach a +[ready state](#ready-bucket) using `kubectl`: + +```sh +kubectl wait bucket/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a Bucket, you can suspend it using the [`.spec.suspend` +field](#suspend). + +#### Suspend a Bucket + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source bucket +``` + +**Note:** When a Bucket has an Artifact and is suspended, and this Artifact +later disappears from the storage due to e.g. the source-controller Pod being +evicted from a Node, this will not be reflected in the Bucket's Status until it +is resumed. + +#### Resume a Bucket + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source bucket +``` + +### Debugging a Bucket + +There are several ways to gather information about a Bucket for debugging +purposes. + +#### Describe the Bucket + +Describing a Bucket using `kubectl describe bucket ` displays the +latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2024-02-02T13:26:55Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: False + Type: Ready + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist +``` + +#### Trace emitted Events + +To view events for specific Bucket(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for Bucket/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m30s Normal NewArtifact bucket/ fetched 16 files with revision from 'my-new-bucket' +36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +18s Warning BucketOperationFailed bucket/ bucket 'my-new-bucket' does not exist +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=`. + +## Bucket Status + +### Artifact + +The Bucket reports the latest synchronized state from the object storage +bucket as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive +(`.tar.gz`), and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +status: + artifact: + digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a + lastUpdateTime: "2024-01-28T10:30:30Z" + path: bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + size: 38099 + url: http://source-controller..svc.cluster.local./bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A Bucket enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-bucket) while fetching storage objects, +it can be [ready](#ready-bucket), or it can [fail during +reconciliation](#failed-bucket). + +The Bucket API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the Bucket to become +`Ready`. + +#### Reconciling Bucket + +The source-controller marks a Bucket as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the Bucket, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the Bucket is newer than the [Observed Generation](#observed-generation). +- The newly calculated Artifact revision differs from the current Artifact. + +When the Bucket is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the Bucket's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the Bucket while their status value is `"True"`. + +#### Ready Bucket + +The source-controller marks a Bucket as _ready_ when it has the following +characteristics: + +- The Bucket reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The Bucket was able to communicate with the Bucket's object storage endpoint + using the current spec. +- The revision of the reported Artifact is up-to-date with the latest + calculated revision of the object storage bucket. + +When the Bucket is "ready", the controller sets a Condition with the following +attributes in the Bucket's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the Bucket +is marked as [reconciling](#reconciling-bucket), or e.g. a +[transient error](#failed-bucket) occurs due to a temporary network issue. + +When the Bucket Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +Bucket's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed Bucket + +The source-controller may get stuck trying to produce an Artifact for a Bucket +without completing. This can occur due to some of the following factors: + +- The object storage [Endpoint](#endpoint) is temporarily unavailable. +- The specified object storage bucket does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The Bucket spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the Bucket's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: BucketOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the Bucket while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the Bucket has this Condition, the controller will continue to attempt +to produce an Artifact for the resource with an exponential backoff, until +it succeeds and the Bucket is marked as [ready](#ready-bucket). + +Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at +the same time, for example due to a newly introduced configuration issue in the +Bucket spec. When a reconciliation fails, the `Reconciling` Condition reason +would be `ProgressingWithRetry`. When the reconciliation is performed again +after the failure, the reason is updated to `Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the Bucket's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-bucket), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Generation + +The source-controller reports an +[observed generation][typical-status-properties] +in the Bucket's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/externalartifacts.md b/docs/spec/v1/externalartifacts.md new file mode 100644 index 000000000..1eccbe0e0 --- /dev/null +++ b/docs/spec/v1/externalartifacts.md @@ -0,0 +1,114 @@ +# External Artifacts + + + +The `ExternalArtifact` is a generic API designed for interoperability with Flux. +It allows 3rd party controllers to produce and store [Artifact](#artifact) objects +in the same way as Flux's own source-controller. +For more details on the design and motivation behind this API, +see [RFC-0012](https://github.com/fluxcd/flux2/tree/main/rfcs/0012-external-artifact). + +## Example + +The following is an example of a ExternalArtifact produced by a 3rd party +source controller: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: ExternalArtifact +metadata: + name: my-artifact + namespace: flux-system +spec: + sourceRef: + apiVersion: example.com/v1 + kind: Source + name: my-source +status: + artifact: + digest: sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + lastUpdateTime: "2025-08-21T13:37:31Z" + path: source/flux-system/my-source/35d47c9d.tar.gz + revision: v1.0.0@sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + size: 20914 + url: http://example-controller.flux-system.svc.cluster.local./source/flux-system/my-source/35d47c9d.tar.gz + conditions: + - lastTransitionTime: "2025-08-21T13:37:31Z" + message: stored artifact for revision v1.0.0 + observedGeneration: 1 + reason: Succeeded + status: "True" + type: Ready +``` + +## ExternalArtifact spec + +### Source reference + +The `spec.sourceRef` field is optional and contains a reference +to the custom resource that the ExternalArtifact is based on. + +The `spec.sourceRef` contains the following fields: + +- `apiVersion`: the API version of the custom resource. +- `kind`: the kind of the custom resource. +- `name`: the name of the custom resource. +- `namespace`: the namespace of the custom resource. If omitted, it defaults to the + namespace of the ExternalArtifact. + +## ExternalArtifact status + +### Artifact + +The ExternalArtifact reports the latest synchronized state +as an Artifact object in the `.status.artifact`. + +The `.status.artifact` contains the following fields: + +- `digest`: The checksum of the tar.gz file in the format `:`. +- `lastUpdateTime`: Timestamp of the last artifact update. +- `path`: Relative file path of the artifact in storage. +- `revision`: Human-readable identifier with version and checksum in the format `@:`. +- `size`: Number of bytes in the tar.gz file. +- `url`: In-cluster HTTP address for artifact retrieval. + +### Conditions + +The ExternalArtifact reports its status using Kubernetes standard conditions. + +#### Ready ExternalArtifact + +When the 3rd party controller has successfully produced and stored an +Artifact in storage, it sets a Condition with the following +attributes in the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful storage of the artifact and the associated revision. + +If the 3rd party controller performs a signature verification +of the artifact, and the verification is successful, a Condition with the +following attributes is added to the ExternalArtifact's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful verification of the artifact and the associated verification method. + +#### Failed ExternalArtifact + +If the 3rd party controller fails to produce and store an Artifact, +it sets the `Ready` Condition status to `False`, and adds a Condition with +the following attributes to the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "False"` +- `reason: FetchFailed` | `reason: StorageOperationFailed` | `reason: VerificationFailed` + +The `message` field should contain a human-readable message indicating +the reason for the failure. diff --git a/docs/spec/v1/gitrepositories.md b/docs/spec/v1/gitrepositories.md new file mode 100644 index 000000000..d39ee73d3 --- /dev/null +++ b/docs/spec/v1/gitrepositories.md @@ -0,0 +1,1259 @@ +# Git Repositories + + + +The `GitRepository` API defines a Source to produce an Artifact for a Git +repository revision. + +## Example + +The following is an example of a GitRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from a Git repository for the +resolved reference. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://github.com/stefanprodan/podinfo + ref: + branch: master +``` + +In the above example: + +- A GitRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the Git repository every five minutes, indicated + by the `.spec.interval` field. +- It clones the `master` branch of the `https://github.com/stefanprodan/podinfo` + repository, indicated by the `.spec.ref.branch` and `.spec.url` fields. +- The specified branch and resolved HEAD revision are used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current GitRepository revision differs from the latest fetched + revision, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `gitrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f gitrepository.yaml + ``` + +2. Run `kubectl get gitrepository` to see the GitRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://github.com/stefanprodan/podinfo 5s True stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + ``` + +3. Run `kubectl describe gitrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the GitRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2 + Last Update Time: 2022-02-14T11:23:36Z + Path: gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz + Revision: master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc + Size: 91318 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz + Conditions: + Last Transition Time: 2022-02-14T11:23:36Z + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-14T11:23:36Z + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' + ``` + +## Writing a GitRepository spec + +As with all other Kubernetes config, a GitRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a GitRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A GitRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the HTTP/S or SSH address of the +Git repository. + +**Note:** Unlike using `git`, the +[shorter scp-like syntax](https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#_the_ssh_protocol) +is not supported for SSH addresses (e.g. `user@example.com:repository.git`). +Instead, the valid URL format is `ssh://user@example.com:22/repository.git`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the GitRepository, containing authentication +credentials for the Git repository. + +The required fields in the Secret depend on the specified protocol in the +[URL](#url). + +#### Basic access authentication + +To authenticate towards a Git repository over HTTPS using basic access +authentication (in other words: using a username and password), the referenced +Secret is expected to contain `.data.username` and `.data.password` values. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: basic-access-auth +type: Opaque +data: + username: + password: +``` + +#### Bearer token authentication + +To authenticate towards a Git repository over HTTPS using bearer token +authentication (in other words: using a `Authorization: Bearer` header), the referenced +Secret is expected to contain the token in `.data.bearerToken`. + +**Note:** If you are looking to use OAuth tokens with popular servers (e.g. +[GitHub](https://docs.github.com/en/rest/overview/authenticating-to-the-rest-api?apiVersion=2022-11-28#authenticating-with-a-token-generated-by-an-app), +[Bitbucket](https://support.atlassian.com/bitbucket-cloud/docs/using-access-tokens/), +[GitLab](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#clone-using-a-token)), +you should use basic access authentication instead. These servers use basic HTTP +authentication, with the OAuth token as the password. Check the documentation of +your Git server for details. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: bearer-token-auth +type: Opaque +data: + bearerToken: +``` + +#### HTTPS Certificate Authority + +To provide a Certificate Authority to trust while connecting with a Git +repository over HTTPS, the referenced Secret's `.data` can contain a `ca.crt` +or `caFile` key. `ca.crt` takes precedence over `caFile`, i.e. if both keys +are present, the value of `ca.crt` will be taken into consideration. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-ca-credentials + namespace: default +type: Opaque +data: + ca.crt: +``` + +#### HTTPS Mutual TLS authentication + +To authenticate towards a Git repository over HTTPS using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used + for TLS client authentication. These must be used in conjunction, i.e. + specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is + required if the server is using a self-signed certificate. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-tls-certs + namespace: default +type: Opaque +data: + tls.crt: + tls.key: + ca.crt: +``` + +#### SSH authentication + +To authenticate towards a Git repository over SSH, the referenced Secret is +expected to contain `identity` and `known_hosts` fields. With the respective +private key of the SSH key pair, and the host keys of the Git repository. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssh-credentials +type: Opaque +stringData: + identity: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- + known_hosts: | + github.com ecdsa-sha2-nistp256 AAAA... +``` + +Alternatively, the Flux CLI can be used to automatically create the +secret, and also populate the known_hosts: + +```sh +flux create secret git podinfo-auth \ + --url=ssh://git@github.com/stefanprodan/podinfo \ + --private-key-file=./identity +``` + +For password-protected SSH private keys, the password must be provided +via an additional `password` field in the secret. Flux CLI also supports +this via the `--password` flag. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider +used for authentication purposes. + +Supported options are: + +- `generic` +- `azure` +- `github` + +When provider is not specified, it defaults to `generic` indicating that +mechanisms using `spec.secretRef` are used for authentication. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### Azure + +The `azure` provider can be used to authenticate to Azure DevOps repositories +automatically using Workload Identity. + +##### Pre-requisites + +- Ensure that your Azure DevOps Organization is + [connected](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/connect-organization-to-azure-ad?view=azure-devops) + to Microsoft Entra. +- Ensure Workload Identity is properly [set up on your + cluster](https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster#create-an-aks-cluster). + +##### Configure Flux controller + +- Create a managed identity to access Azure DevOps. Establish a federated + identity credential between the managed identity and the source-controller + service account. In the default installation, the source-controller service + account is located in the `flux-system` namespace with name + `source-controller`. Ensure the federated credential uses the correct + namespace and name of the source-controller service account. For more details, + please refer to this + [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +- Add the managed identity to the Azure DevOps organization as a user. Ensure + that the managed identity has the necessary permissions to access the Azure + DevOps repository as described + [here](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/service-principal-managed-identity?view=azure-devops#2-add-and-manage-service-principals-in-an-azure-devops-organization). + +- Add the following patch to your bootstrap repository in + `flux-system/kustomization.yaml` file: + + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +**Note:** When azure `provider` is used with `GitRepository`, the `.spec.url` +must follow this format: + +``` +https://dev.azure.com/{your-organization}/{your-project}/_git/{your-repository} +``` +#### GitHub + +The `github` provider can be used to authenticate to Git repositories using +[GitHub Apps](https://docs.github.com/en/apps/overview). + +##### Pre-requisites + +- [Register](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) + the GitHub App with the necessary permissions and [generate a private + key](https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/managing-private-keys-for-github-apps) + for the app. + +- [Install](https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app) + the app in the organization/account configuring access to the necessary + repositories. + +##### Configure GitHub App secret + +The GitHub App information is specified in `.spec.secretRef` in the format +specified below: + +- Get the App ID from the app settings page at + `https://github.com/settings/apps/`. +- Get the App Installation ID from the app installations page at +`https://github.com/settings/installations`. Click the installed app, the URL +will contain the installation ID +`https://github.com/settings/installations/`. For +organizations, the first part of the URL may be different, but it follows the +same pattern. +- The private key that was generated in the pre-requisites. +- (Optional) GitHub Enterprise Server users can set the base URL to + `http(s)://HOSTNAME/api/v3`. +- (Optional) If GitHub Enterprise Server uses a private CA, include its bundle (root and any intermediates) in `ca.crt`. + If the `ca.crt` is specified, then it will be used for TLS verification for all API / Git over `HTTPS` requests to the GitHub Enterprise Server. + +**NOTE:** If the secret contains `tls.crt`, `tls.key` then [mutual TLS configuration](#https-mutual-tls-authentication) will be automatically enabled. +Omit these keys if the GitHub server does not support mutual TLS. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: github-sa +type: Opaque +stringData: + githubAppID: "" + githubAppInstallationID: "" + githubAppPrivateKey: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- + githubAppBaseURL: "" #optional, required only for GitHub Enterprise Server users + ca.crt: | #optional, for GitHub Enterprise Server users + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +Alternatively, the Flux CLI can be used to automatically create the secret with +the github app authentication information. + +```sh +flux create secret githubapp ghapp-secret \ + --app-id=1 \ + --app-installation-id=3 \ + --app-private-key=~/private-key.pem +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as GitRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `azure`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. For Azure DevOps specific setup, see the + [Azure DevOps integration guide](https://fluxcd.io/flux/integrations/azure/#for-azure-devops). + +**Note:** that for a publicly accessible git repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Git repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple GitRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for Git operations +like cloning. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the Git reference to resolve and +watch for changes. References are specified in one or more subfields +(`.branch`, `.tag`, `.semver`, `.name`, `.commit`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to a `master` +branch reference. + +#### Branch example + +To Git checkout a specified branch, use `.spec.ref.branch`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + branch: +``` + +This will perform a shallow clone to only fetch the specified branch. + +#### Tag example + +To Git checkout a specified tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + tag: +``` + +This field takes precedence over [`.branch`](#branch-example). + +#### SemVer example + +To Git checkout a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.branch`](#branch-example) and +[`.tag`](#tag-example). + + +#### Name example + +To Git checkout a specified [reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References), +use `.spec.ref.name`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + # Ref name format reference: https://git-scm.com/docs/git-check-ref-format#_description + name: +``` + +Valid examples are: `refs/heads/main`, `refs/tags/v0.1.0`, `refs/pull/420/head`, +`refs/merge-requests/1/head`. + +This field takes precedence over [`.branch`](#branch-example), +[`.tag`](#tag-example), and [`.semver`](#semver-example). + +**Note:** Azure DevOps and AWS CodeCommit do not support fetching the HEAD of +a pull request. While Azure DevOps allows you to fetch the merge commit that +will be created after merging a PR (using `refs/pull//merge`), this field +can only be used to fetch references that exist in the current state of the Git +repository and not references that will be created in the future. + +#### Commit example + +To Git checkout a specified commit, use `.spec.ref.commit`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + commit: "" +``` + +This field takes precedence over all other fields. It can be combined with +`.spec.ref.branch` to perform a shallow clone of the branch, in which the +commit must exist: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ref: + branch: + commit: "" +``` + +### Verification + +`.spec.verify` is an optional field to enable the verification of Git commit +signatures. The field offers two subfields: + +- `.mode`, to specify what Git object(s) should be verified. Supported + values are: + - `HEAD`: Verifies the commit object pointed to by the HEAD of the repository + after performing a checkout via `.spec.ref`. + - `head`: Same as `HEAD`, supported for backwards compatibility purposes. + - `Tag`: Verifies the tag object pointed to by the specified/inferred tag + reference in `.spec.ref.tag`, `.spec.ref.semver` or `.spec.ref.name`. + - `TagAndHEAD`: Verifies the tag object pointed to by the specified/inferred tag + reference in `.spec.ref.tag`, `.spec.ref.semver` or `.spec.ref.name` and + the commit object pointed to by the tag. + +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the GitRepository. Containing the (PGP) public keys of trusted Git authors. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 1m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + verify: + mode: HEAD + secretRef: + name: pgp-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the GitRepository's `.status.conditions`: + +- `type: SourceVerifiedCondition` +- `status: "True"` +- `reason: Succeeded` + +#### Verification Secret example + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgp-public-keys + namespace: default +type: Opaque +data: + author1.asc: + author2.asc: +``` + +Exporting armored public keys (`.asc` files) using `gpg`, and generating a +Secret: + +```sh +# Export armored public keys +gpg --export --armor 3CB12BA185C47B67 > author1.asc +gpg --export --armor 6A7436E8790F8689 > author2.asc +# Generate secret +kubectl create secret generic pgp-public-keys \ + --from-file=author1.asc \ + --from-file=author2.asc \ + -o yaml +``` + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Sparse checkout + +`.spec.sparseCheckout` is an optional field to specify list of directories to +checkout when cloning the repository. If specified, only the specified directory +contents will be present in the artifact produced for this repository. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + sparseCheckout: + - charts + - kustomize +``` + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +GitRepository. When set to `true`, the controller will stop reconciling the +GitRepository, and changes to the resource or in the Git repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all remote Git operations related to the GitRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +The proxy server must be either HTTP/S or SOCKS5. You can use a SOCKS5 proxy +with a HTTP/S Git repository url. + +Examples: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssh-proxy +type: Opaque +stringData: + address: socks5://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Recurse submodules + +`.spec.recurseSubmodules` is an optional field to enable the initialization of +all submodules within the cloned Git repository, using their default settings. +This option defaults to `false`. + +Note that for most Git providers (e.g. GitHub and GitLab), deploy keys can not +be used as reusing a key across multiple repositories is not allowed. You have +to use either [HTTPS token-based authentication](#basic-access-authentication), +or an SSH key belonging to a (bot) user who has access to the main repository +and all submodules. + +### Include + +`.spec.include` is an optional field to map the contents of GitRepository +Artifacts into another. This may look identical to Git submodules but has +multiple benefits over regular submodules: + +- Including a `GitRepository` allows you to use different authentication + methods for different repositories. +- A change in the included repository will trigger an update of the including + repository. +- Multiple `GitRepository` objects could include the same repository, which + decreases the amount of cloning done compared to using submodules. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: include-example +spec: + include: + - repository: + name: other-repository + fromPath: deploy/kubernetes + toPath: base/app +``` + +The `.fromPath` and `.toPath` fields allow you to limit the files included, and +where they will be copied to. If you do not specify a value for `.fromPath`, +all files from the referenced GitRepository Artifact will be included. The +`.toPath` defaults to the `.repository.name` (e.g. `./other-repository/*`). + +## Working with GitRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the Git repository contents as an Artifact. It is +possible to overwrite and/or overrule the default exclusions using a file in +the Git repository and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the Git +repository. The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the repository root or in subdirectories. + +#### Ignore spec + +Another option is to define the exclusions within the GitRepository spec, using +the [`.spec.ignore` field](#ignore). Specified rules override the [default +exclusion list](#default-exclusions), and may overrule `.sourceignore` file +exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a GitRepository outside the +[specified interval window](#interval), a GitRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the GitRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite gitrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source git +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the GitRepository to reach +a [ready state](#ready-gitrepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a GitRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a GitRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch gitrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source git +``` + +**Note:** When a GitRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +GitRepository's Status until it is resumed. + +#### Resume a GitRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch gitrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source git +``` + +### Debugging a GitRepository + +There are several ways to gather information about a GitRepository for +debugging purposes. + +#### Describe the GitRepository + +Describing a GitRepository using +`kubectl describe gitrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" + Observed Generation: 2 + Reason: GitOperationFailed + Status: False + Type: Ready + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" + Observed Generation: 2 + Reason: GitOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning GitOperationFailed 2s (x9 over 4s) source-controller failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" +``` + +#### Trace emitted Events + +To view events for specific GitRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for GitRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact gitrepository/ stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' +36s Normal ArtifactUpToDate gitrepository/ artifact up-to-date with remote revision: 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' +94s Warning GitOperationFailed gitrepository/ failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific GitRepository, e.g. +`flux logs --level=error --kind=GitRepository --name=`. + +## GitRepository Status + +### Artifact + +The GitRepository reports the latest synchronized state from the Git repository +as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: +status: + artifact: + digest: sha256:e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b + lastUpdateTime: "2022-01-29T06:59:23Z" + path: gitrepository///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: master@sha1:363a6a8fe6a7f13e05d34c163b0ef02a777da20a + size: 91318 + url: http://source-controller..svc.cluster.local./gitrepository///363a6a8fe6a7f13e05d34c163b0ef02a777da20a.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A GitRepository enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-gitrepository) while fetching the Git +state, it can be [ready](#ready-gitrepository), or it can [fail during +reconciliation](#failed-gitrepository). + +The GitRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the GitRepository to +become `Ready`. + +#### Reconciling GitRepository + +The source-controller marks a GitRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the GitRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the GitRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact revision differs from the current Artifact. + +When the GitRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the GitRepository's +`.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the GitRepository while their status value is `"True"`. + +#### Ready GitRepository + +The source-controller marks a GitRepository as _ready_ when it has the +following characteristics: + +- The GitRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote Git repository using + the current spec. +- The revision of the reported Artifact is up-to-date with the latest + resolved revision of the remote Git repository. + +When the GitRepository is "ready", the controller sets a Condition with the +following attributes in the GitRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +GitRepository is marked as [reconciling](#reconciling-gitrepository), or e.g. a +[transient error](#failed-gitrepository) occurs due to a temporary network issue. + +When the GitRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +GitRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed GitRepository + +The source-controller may get stuck trying to produce an Artifact for a +GitRepository without completing. This can occur due to some of the following +factors: + +- The remote Git repository [URL](#url) is temporarily unavailable. +- The Git repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- A specified Include is unavailable. +- The verification of the Git commit signature failed. +- The credentials in the referenced Secret are invalid. +- The GitRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the GitRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: GitOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the GitRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the +[verification of a Git commit signature](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerifiedCondition` +- `status: "False"` +- `reason: Failed` + +While the GitRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the GitRepository is marked as +[ready](#ready-gitrepository). + +Note that a GitRepository can be [reconciling](#reconciling-gitrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the GitRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the GitRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-gitrepository), or stalled due to error +it can not recover from without human intervention. +The value is the same as the [ignore in spec](#ignore). +It indicates the ignore rules used in building the current artifact in storage. +It is also used by the controller to determine if an artifact needs to be +rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + cue + pkg + ... +``` + +### Observed Recurse Submodules + +The source-controller reports an observed recurse submodule in the +GitRepository's `.status.observedRecurseSubmodules`. The observed recurse +submodules is the latest `.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[recurse submodules in spec](#recurse-submodules). It indicates the recurse +submodules configuration used in building the current artifact in storage. It is +also used by the controller to determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedRecurseSubmodules: true + ... +``` + +### Observed Include + +The source-controller reports observed include in the GitRepository's +`.status.observedInclude`. The observed include is the latest +`.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[include in spec](#include). It indicates the include configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedInclude: + - fromPath: deploy/webapp + repository: + name: repo1 + toPath: foo + - fromPath: deploy/secure + repository: + name: repo2 + toPath: bar + ... +``` + +### Observed Sparse Checkout + +The source-controller reports observed sparse checkout in the GitRepository's +`.status.observedSparseCheckout`. The observed sparse checkout is the latest +`.spec.sparseCheckout` value which resulted in a [ready +state](#ready-gitrepository), or stalled due to error it can not recover from +without human intervention. The value is the same as the [sparseCheckout in +spec](#sparse-checkout). It indicates the sparse checkout configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedSparseCheckout: + - charts + - kustomize + ... +``` + +### Source Verification Mode + +The source-controller reports the Git object(s) it verified in the Git +repository to create an artifact in the GitRepository's +`.status.sourceVerificationMode`. This value is the same as the [verification +mode in spec](#verification). The verification status is applicable only to the +latest Git repository revision used to successfully build and store an +artifact. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the GitRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-gitrepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/helmcharts.md b/docs/spec/v1/helmcharts.md new file mode 100644 index 000000000..eae4d5b9c --- /dev/null +++ b/docs/spec/v1/helmcharts.md @@ -0,0 +1,865 @@ +# Helm Charts + + + +The `HelmChart` API defines a Source to produce an Artifact for a Helm chart +archive with a set of specific configurations. + +## Example + +The following is an example of a HelmChart. It fetches and/or packages a Helm +chart and exposes it as a tarball (`.tgz`) Artifact for the specified +configuration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: '5.*' +``` + +In the above example: + +- A HelmChart named `podinfo` is created, indicated by the `.metadata.name` + field. +- The source-controller fetches the Helm chart every five minutes from the + `podinfo` HelmRepository source reference, indicated by the + `.spec.sourceRef.kind` and `.spec.sourceRef.name` fields. +- The fetched Helm chart version is the latest available chart + version in the range specified in `spec.version`. This version is also used as + Artifact revision, reported in-cluster in the `.status.artifact.revision` + field. +- When the current Helm Chart version differs from the latest available chart + in the version range, it is fetched and/or packaged as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmchart.yaml`. + +**Note:** HelmChart is usually used by the helm-controller. Based on the +HelmRelease configuration, an associated HelmChart is created by the +helm-controller. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmchart.yaml + ``` + +2. Run `kubectl get helmchart` to see the HelmChart: + + ```console + NAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS + podinfo podinfo 5.* HelmRepository podinfo 53s True pulled 'podinfo' chart with version '5.2.1' + ``` + +3. Run `kubectl describe helmchart podinfo` to see the [Artifact](#artifact) and + [Conditions](#conditions) in the HelmChart's Status: + + ```console + Status: + Observed Source Artifact Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Artifact: + Digest: sha256:6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3 + Last Update Time: 2022-02-13T11:24:10Z + Path: helmchart/default/podinfo/podinfo-5.2.1.tgz + Revision: 5.2.1 + Size: 14166 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/podinfo-5.2.1.tgz + Conditions: + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: Ready + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: ArtifactInStorage + Observed Chart Name: podinfo + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ChartPullSucceeded 2m51s source-controller pulled 'podinfo' chart with version '5.2.1' + ``` + +## Writing a HelmChart spec + +As with all other Kubernetes config, a HelmChart needs `apiVersion`, `kind`, and +`metadata` fields. The name of a HelmChart object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmChart also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Source reference + +`.spec.sourceRef` is a required field that specifies a reference to the Source +the chart is available at. + +Supported references are: +- [`HelmRepository`](helmrepositories.md) +- [`GitRepository`](gitrepositories.md) +- [`Bucket`](buckets.md) + +Although there are three kinds of source references, there are only two +underlying implementations. The artifact building process for `GitRepository` +and `Bucket` are the same as they are already built source artifacts. In case +of `HelmRepository`, a chart is fetched and/or packaged based on the +configuration of the Helm chart. + +For a `HelmChart` to be reconciled, the associated artifact in the source +reference must be ready. If the source artifact is not ready, the `HelmChart` +reconciliation is retried. + +When the `metadata.generation` of the `HelmChart` don't match with the +`status.observedGeneration`, the chart is fetched from source and/or packaged. +If there's no `.spec.valuesFiles` specified, the chart is only fetched from the +source, and not packaged. If `.spec.valuesFiles` are specified, the chart is +fetched and packaged with the values files. When the `metadata.generation` +matches the `status.observedGeneration`, the chart is only fetched from source +or from the cache if available, and not packaged. + +When using a `HelmRepository` source reference, the secret reference defined in +the Helm repository is used to fetch the chart. + +The HelmChart reconciliation behavior varies depending on the source reference +kind, see [reconcile strategy](#reconcile-strategy). + +The attributes of the generated artifact also varies depending on the source +reference kind, see [artifact](#artifact). + +### Chart + +`.spec.chart` is a required field that specifies the name or path the Helm chart +is available at in the [Source reference](#source-reference). + +For `HelmRepository` Source reference, it'll be just the name of the chart. + +```yaml +spec: + chart: podinfo + sourceRef: + name: podinfo + kind: HelmRepository +``` + +For `GitRepository` and `Bucket` Source reference, it'll be the path to the +Helm chart directory. + +```yaml +spec: + chart: ./charts/podinfo + sourceRef: + name: podinfo + kind: +``` + +### Version + +`.spec.version` is an optional field to specify the version of the chart in +semver. It is applicable only when the Source reference is a `HelmRepository`. +It is ignored for `GitRepository` and `Bucket` Source reference. It defaults to +the latest version of the chart with value `*`. + +Version can be a fixed semver, minor or patch semver range of a specific +version (i.e. `4.0.x`) or any semver range (i.e. `>=4.0.0 <5.0.0`). + +### Values files + +`.spec.valuesFiles` is an optional field to specify an alternative list of +values files to use as the chart values (values.yaml). The file paths are +expected to be relative to the Source reference. Values files are merged in the +order of the list with the last file overriding the first. It is ignored when +omitted. When values files are specified, the chart is fetched and packaged +with the provided values. + +```yaml +spec: + chart: + spec: + chart: podinfo + ... + valuesFiles: + - values.yaml + - values-production.yaml +``` + +Values files also affect the generated artifact revision, see +[artifact](#artifact). + +### Ignore missing values files + +`.spec.ignoreMissingValuesFiles` is an optional field to specify whether missing +values files should be ignored rather than be considered errors. It defaults to +`false`. + +When `.spec.valuesFiles` and `.spec.ignoreMissingValuesFiles` are specified, +the `.status.observedValuesFiles` field is populated with the list of values +files that were found and actually contributed to the packaged chart. + +### Reconcile strategy + +`.spec.reconcileStrategy` is an optional field to specify what enables the +creation of a new Artifact. Valid values are `ChartVersion` and `Revision`. +`ChartVersion` is used for creating a new artifact when the chart version +changes in a `HelmRepository`. `Revision` is used for creating a new artifact +when the source revision changes in a `GitRepository` or a `Bucket` Source. It +defaults to `ChartVersion`. + +**Note:** If the reconcile strategy is `ChartVersion` and the source reference +is a `GitRepository` or a `Bucket`, no new chart artifact is produced on updates +to the source unless the `version` in `Chart.yaml` is incremented. To produce +new chart artifact on change in source revision, set the reconcile strategy to +`Revision`. + +Reconcile strategy also affects the artifact version, see [artifact](#artifact) +for more details. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Helm Chart source must be checked for updates. + +After successfully reconciling a HelmChart object, the source-controller +requeues the object for inspection after the specified interval. The value must +be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the source for updates every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmChart objects are set +up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmChart. When set to `true`, the controller will stop reconciling the +HelmChart, and changes to the resource or the Helm chart Source will not result +in a new Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +### Verification + +**Note:** This feature is available only for Helm charts fetched from an OCI Registry. + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the HelmChart, containing the public keys of trusted authors. For Notation this Secret should also include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of HelmChart hosted in an OCI Registry, create a Kubernetes +secret with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify a HelmChart's signature. +This allows for older HelmCharts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available HelmCharts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying HelmCharts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + interval: 5m + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: ">=6.1.6" + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo +spec: + interval: 1m0s + url: oci://ghcr.io/stefanprodan/charts + type: "oci" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +## Working with HelmCharts + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmChart outside the +[specified interval window](#interval), a HelmCHart can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmchart/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmChart to reach a +[ready state](#ready-helmchart) using `kubectl`: + +```sh +kubectl wait helmchart/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmChart, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmChart + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +**Note:** When a HelmChart has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmChart's Status until it is resumed. + +#### Resume a HelmChart + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +### Debugging a HelmChart + +There are several ways to gather information about a HelmChart for debugging +purposes. + +#### Describe the HelmChart + +Describing a HelmChart using `kubectl describe helmchart ` displays +the latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: Stalled + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: False + Type: Ready + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: FetchFailed + Last Handled Reconcile At: 1644759954 + Observed Chart Name: podinfo + Observed Generation: 3 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning InvalidChartReference 11s source-controller invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with ver +sion matching '9.*' found +``` + +#### Trace emitted Events + +To view events for specific HelmChart(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmChart/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +22s Warning InvalidChartReference helmchart/ invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found +2s Normal ChartPullSucceeded helmchart/ pulled 'podinfo' chart with version '6.0.3' +2s Normal ArtifactUpToDate helmchart/ artifact up-to-date with remote revision: '6.0.3' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmChart, e.g. `flux logs --level=error --kind=HelmChart --name=`. + +### Improving resource consumption by enabling the cache + +When using a `HelmRepository` as Source for a `HelmChart`, the controller loads +the repository index in memory to find the latest version of the chart. + +The controller can be configured to cache Helm repository indexes in memory. +The cache is used to avoid loading repository indexes for every `HelmChart` +reconciliation. + +The following flags are provided to enable and configure the cache: +- `helm-cache-max-size`: The maximum size of the cache in number of indexes. + If `0`, then the cache is disabled. +- `helm-cache-ttl`: The TTL of an index in the cache. +- `helm-cache-purge-interval`: The interval at which the cache is purged of + expired items. + +The caching strategy is to pull a repository index from the cache if it is +available, otherwise to load the index, retrieve and build the chart, +then cache the index. The cached index TTL is refreshed every time the +Helm repository index is loaded with the `helm-cache-ttl` value. + +The cache is purged of expired items every `helm-cache-purge-interval`. + +When the cache is full, no more items can be added to the cache, and the +source-controller will report a warning event instead. + +In order to use the cache, set the related flags in the source-controller +Deployment config: + +```yaml + spec: + containers: + - args: + - --watch-all-namespaces + - --log-level=info + - --log-encoding=json + - --enable-leader-election + - --storage-path=/data + - --storage-adv-addr=source-controller.$(RUNTIME_NAMESPACE).svc.cluster.local. + ## Helm cache with up to 10 items, i.e. 10 indexes. + - --helm-cache-max-size=10 + ## TTL of an index is 1 hour. + - --helm-cache-ttl=1h + ## Purge expired index every 10 minutes. + - --helm-cache-purge-interval=10m +``` + +## HelmChart Status + +### Artifact + +The HelmChart reports the last built chart as an Artifact object in the +`.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`-.tgz`), +and can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e + lastUpdateTime: "2022-02-10T18:53:47Z" + path: helmchart///-.tgz + revision: 6.0.3 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-.tgz +``` + +When using a `HelmRepository` as the source reference and values files are +provided, the value of `status.artifact.revision` is the chart version combined +with the `HelmChart` object generation. For example, if the chart version is +`6.0.3` and the `HelmChart` object generation is `1`, the +`status.artifact.revision` value will be `6.0.3+1`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+1.tgz + revision: 6.0.3+1 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+1.tgz + observedGeneration: 1 + ... +``` + +When using a `GitRepository` or a `Bucket` as the source reference and +`Revision` as the reconcile strategy, the value of `status.artifact.revision` is +the chart version combined with the first 12 characters of the revision of the +`GitRepository` or `Bucket`. For example if the chart version is `6.0.3` and the +revision of the `Bucket` is `4e5cbb7b97d00a8039b8810b90b922f4256fd3bd8f78b934b4892dae13f7ca87`, +the `status.artifact.revision` value will be `6.0.3+4e5cbb7b97d0`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+4e5cbb7b97d0.tgz + revision: 6.0.3+4e5cbb7b97d0 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+4e5cbb7b97d0.tgz +``` + +### Conditions + +A HelmChart enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmchart) while fetching or building the +chart, it can be [ready](#ready-helmchart), it can +[fail during reconciliation](#failed-helmchart), or it can +[stall](#stalled-helmchart). + +The HelmChart API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmChart to become +`Ready`. + +#### Reconciling HelmChart + +The source-controller marks a HelmChart as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the HelmChart, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the HelmChart is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmChart is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmChart's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new version, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewChart` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmChart while their status value is `"True"`. + +#### Ready HelmChart + +The source-controller marks a HelmChart as _ready_ when it has the following +characteristics: + +- The HelmChart reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch and build the Helm chart using the current + spec. +- The version/revision of the reported Artifact is up-to-date with the + latest version/revision of the Helm chart. + +When the HelmChart is "ready", the controller sets a Condition with the +following attributes in the HelmChart's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmChart is marked as [reconciling](#reconciling-helmchart), or e.g. +a [transient error](#failed-helmchart) occurs due to a temporary network issue. + +When the HelmChart Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmChart's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmChart + +The source-controller may get stuck trying to produce an Artifact for a +HelmChart without completing. This can occur due to some of the following +factors: + +- The Helm chart Source is temporarily unavailable. +- The credentials in the [Source reference](#source-reference) Secret are + invalid. +- The HelmChart spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmChart's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: StorageOperationFailed` | `reason: URLInvalid` | `reason: IllegalPath` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmChart while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmChart has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmChart is marked as [ready](#ready-helmchart). + +Note that a HelmChart can be [reconciling](#reconciling-helmchart) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmChart spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmChart + +The source-controller can mark a HelmChart as _stalled_ when it determines that +without changes to the spec, the reconciliation can not succeed. +For example because a HelmChart Version is set to a non-existing version. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmchart), but adds another Condition with the following +attributes to the HelmChart's `.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: InvalidChartReference` + +While the HelmChart has this Condition, the controller will not requeue the +resource any further, and will stop reconciling the resource until a change to +the spec is made. + +### Observed Source Artifact Revision + +The source-controller reports the revision of the last +[Source reference's](#source-reference) Artifact the current chart was fetched +from in the HelmChart's `.status.observedSourceArtifactRevision`. It is used to +keep track of the source artifact revision and detect when a new source +artifact is available. + +### Observed Chart Name + +The source-controller reports the last resolved chart name of the Artifact +for the [`.spec.chart` field](#chart) in the HelmChart's +`.status.observedChartName`. It is used to keep track of the chart and detect +when a new chart is found. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmChart's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-helmchart), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/helmrepositories.md b/docs/spec/v1/helmrepositories.md new file mode 100644 index 000000000..97fdff2ec --- /dev/null +++ b/docs/spec/v1/helmrepositories.md @@ -0,0 +1,881 @@ +# Helm Repositories + + + +There are 2 [Helm repository types](#type) defined by the `HelmRepository` API: +- Helm HTTP/S repository, which defines a Source to produce an Artifact for a Helm +repository index YAML (`index.yaml`). +- OCI Helm repository, which defines a source that does not produce an Artifact. + It's a data container to store the information about the OCI repository that + can be used by [HelmChart](helmcharts.md) to access OCI Helm charts. + +## Examples + +### Helm HTTP/S repository + +The following is an example of a HelmRepository. It creates a YAML (`.yaml`) +Artifact from the fetched Helm repository index (in this example the [podinfo +repository](https://github.com/stefanprodan/podinfo)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://stefanprodan.github.io/podinfo +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller fetches the Helm repository index YAML every five + minutes from `https://stefanprodan.github.io/podinfo`, indicated by the + `.spec.interval` and `.spec.url` fields. +- The digest (algorithm defaults to SHA256) of the Helm repository index after + stable sorting the entries is used as Artifact revision, reported in-cluster + in the `.status.artifact.revision` field. +- When the current HelmRepository revision differs from the latest fetched + revision, it is stored as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://stefanprodan.github.io/podinfo 4s True stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + ``` + +3. Run `kubectl describe helmrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the HelmRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Last Update Time: 2022-02-04T09:55:58Z + Path: helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Size: 40898 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Conditions: + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 1m source-controller fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' + ``` + +### Helm OCI repository + +The following is an example of an OCI HelmRepository. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + type: "oci" + interval: 5m0s + url: oci://ghcr.io/stefanprodan/charts +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- A HelmChart that refers to this HelmRepository uses the URL in the `.spec.url` + field to access the OCI Helm chart. + +**NOTE:** The `.spec.interval` field is only used by the `default` Helm +repository and is ignored for any value in `oci` Helm repository. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/charts 3m22s + ``` + +Because the OCI Helm repository is a data container, there's nothing to report +for `READY` and `STATUS` columns above. The existence of the object can be +considered to be ready for use. + +## Writing a HelmRepository spec + +As with all other Kubernetes config, a HelmRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a HelmRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Type + +`.spec.type` is an optional field that specifies the Helm repository type. + +Possible values are `default` for a Helm HTTP/S repository, or `oci` for an OCI Helm repository. + +**Note:**: For improved support for OCI Helm charts, please use the +[`OCIRepository`](ocirepositories.md) API. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used +for authentication purposes. + +Supported options are: +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when static credentials +are used for authentication. If you do not specify `.spec.provider`, it defaults +to `generic`. + +**Note**: The provider field is supported only for Helm OCI repositories. The `spec.type` +field must be set to `oci`. + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS worker +node IAM role or IAM Role for Service Accounts (IRSA), and by extension gain access +to ECR. + +##### EKS Worker Node IAM Role + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +##### IAM Role for Service Accounts (IRSA) + +When using IRSA to enable access to ECR, add the following patch to your bootstrap +repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running on +it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Azure Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes or +Workload Identity, and by extension gain access to GCR or Artifact Registry. + +##### Access Scopes + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and Artifact Registry, +source-controller running on it will also have access to them. + +##### GKE Workload Identity + +When using Workload Identity to enable access to GCR or Artifact Registry, add the +following patch to your bootstrap repository, in the `flux-system/kustomization.yaml` +file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using Google Container Registry service, +the needed permission is instead `storage.objects.list` which can be bound as part +of the Container Registry Service Agent role. Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure non-TLS connections when fetching Helm chart OCI artifacts. + +**Note**: The insecure field is supported only for Helm OCI repositories. +The `spec.type` field must be set to `oci`. + +### Interval + +**Note:** This field is ineffectual for [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.interval` is a an optional field that specifies the interval which the +Helm repository index must be consulted at. When not set, the default value is +`1m`. + +After successfully reconciling a HelmRepository object, the source-controller +requeues the object for inspection after the specified interval. The value +must be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to fetch the HelmRepository index YAML every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmRepository objects +are set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### URL + +`.spec.url` is a required field that depending on the [type of the HelmRepository object](#type) +specifies the HTTP/S or OCI address of a Helm repository. + +For OCI, the URL is expected to point to a registry repository, e.g. `oci://ghcr.io/fluxcd/source-controller`. + +For Helm repositories which require authentication, see [Secret reference](#secret-reference). + +### Timeout + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.timeout` is an optional field to specify a timeout for the fetch +operation. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. When not set, the +default value is `1m`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the HelmRepository, containing authentication +credentials for the repository. + +#### Basic access authentication + +To authenticate towards a Helm repository using basic access authentication +(in other words: using a username and password), the referenced Secret is +expected to contain `.data.username` and `.data.password` values. + +For example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + secretRef: + name: example-user +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-user + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +OCI Helm repository example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/my-user/my-private-repo + type: "oci" + secretRef: + name: oci-creds +--- +apiVersion: v1 +kind: Secret +metadata: + name: oci-creds + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +For OCI Helm repositories, Kubernetes secrets of type [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types) are also supported. +It is possible to create one such secret with `kubectl create secret docker-registry` +or using the Flux CLI: + +```yaml +flux create secret oci ghcr-auth \ + --url=ghcr.io \ + --username=flux \ + --password=${GITHUB_PAT} +``` + +**Warning:** Support for specifying TLS authentication data using this API has been +deprecated. Please use [`.spec.certSecretRef`](#cert-secret-reference) instead. +If the controller uses the secret specified by this field to configure TLS, then +a deprecation warning will be logged. + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards a Helm repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Pass credentials + +`.spec.passCredentials` is an optional field to allow the credentials from the +[Secret reference](#secret-reference) to be passed on to a host that does not +match the host as defined in URL. This may for example be required if the host +advertised chart URLs in the index differ from the specified URL. + +Enabling this should be done with caution, as it can potentially result in +credentials getting stolen in a man-in-the-middle attack. This feature only applies +to HTTP/S Helm repositories. + +### Suspend + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmRepository. When set to `true`, the controller will stop reconciling the +HelmRepository, and changes to the resource or the Helm repository index will +not result in a new Artifact. When the field is set to `false` or removed, it +will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with HelmRepositories + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, once created, they +are ready to used by [HelmCharts](helmcharts.md). + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmRepository outside the +[specified interval window](#interval), a HelmRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source helm +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmRepository to +reach a [ready state](#ready-helmrepository) using `kubectl`: + +```sh +kubectl wait helmrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source helm +``` + +**Note:** When a HelmRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmRepository's Status until it is resumed. + +#### Resume a HelmRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source helm +``` + +### Debugging a HelmRepository + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, they are static +objects that don't require debugging if valid. + +There are several ways to gather information about a HelmRepository for debugging +purposes. + +#### Describe the HelmRepository + +Describing a HelmRepository using `kubectl describe helmrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: Stalled + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: False + Type: Ready + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: FetchFailed + Observed Generation: 2 + URL: http://source-controller.source-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning Failed 6s source-controller failed to construct Helm client: scheme "invalid" not supported +``` + +#### Trace emitted Events + +To view events for specific HelmRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +107s Warning Failed helmrepository/ failed to construct Helm client: scheme "invalid" not supported +7s Normal NewArtifact helmrepository/ fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' +3s Normal ArtifactUpToDate helmrepository/ artifact up-to-date with remote revision: 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmRepository, e.g. `flux logs --level=error --kind=HelmRepository --name=`. + +## HelmRepository Status + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), they do not contain any information in the +status. + +### Artifact + +The HelmRepository reports the last fetched repository index as an Artifact +object in the `.status.artifact` of the resource. + +The Artifact file is an exact copy of the Helm repository index YAML +(`index-.yaml`) as fetched, and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +status: + artifact: + digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + lastUpdateTime: "2022-02-04T09:55:58Z" + path: helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + size: 40898 + url: http://source-controller.flux-system.svc.cluster.local./helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml +``` + +### Conditions + +A HelmRepository enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmrepository) while fetching the +repository index, it can be [ready](#ready-helmrepository), it can +[fail during reconciliation](#failed-helmrepository), or it can +[stall](#stalled-helmrepository). + +The HelmRepository API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmRepository to become +`Ready`. + +#### Reconciling HelmRepository + +The source-controller marks a HelmRepository as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the HelmRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the HelmRepository is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmRepository while their status value is `"True"`. + +#### Ready HelmRepository + +The source-controller marks a HelmRepository as _ready_ when it has the following +characteristics: + +- The HelmRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch the Helm repository index using the current + spec. +- The revision of the reported Artifact is up-to-date with the latest + revision of the Helm repository. + +When the HelmRepository is "ready", the controller sets a Condition with the following +attributes in the HelmRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmRepository is marked as [reconciling](#reconciling-helmrepository), or e.g. +a [transient error](#failed-helmrepository) occurs due to a temporary network +issue. + +When the HelmRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmRepository + +The source-controller may get stuck trying to produce an Artifact for a +HelmRepository without completing. This can occur due to some of the following +factors: + +- The Helm repository [URL](#url) is temporarily unavailable. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The HelmRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: IndexationFailed` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmRepository has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmRepository is marked as [ready](#ready-helmrepository). + +Note that a HelmRepository can be [reconciling](#reconciling-helmrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmRepository + +The source-controller can mark a HelmRepository as _stalled_ when it determines +that without changes to the spec, the reconciliation can not succeed. +For example because a Helm repository URL with an unsupported protocol is +specified. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmrepository), but adds another Condition with the following +attributes to the HelmRepository's +`.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: URLInvalid` + +While the HelmRepository has this Condition, the controller will not requeue +the resource any further, and will stop reconciling the resource until a change +to the spec is made. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-helmrepository), +or stalled due to error it can not recover from without human intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/ocirepositories.md b/docs/spec/v1/ocirepositories.md new file mode 100644 index 000000000..d2bfa399e --- /dev/null +++ b/docs/spec/v1/ocirepositories.md @@ -0,0 +1,1147 @@ +# OCI Repositories + + + +The `OCIRepository` API defines a Source to produce an Artifact for an OCI +repository. + +## Example + +The following is an example of an OCIRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from an OCI repository for the +resolved digest. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + tag: latest +``` + +In the above example: + +- An OCIRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the OCI repository every five minutes, indicated + by the `.spec.interval` field. +- It pulls the `latest` tag of the `ghcr.io/stefanprodan/manifests/podinfo` + repository, indicated by the `.spec.ref.tag` and `.spec.url` fields. +- The resolved tag and SHA256 digest is used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current OCIRepository digest differs from the latest fetched + digest, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `ocirepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f ocirepository.yaml + ``` + +2. Run `kubectl get ocirepository` to see the OCIRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + ``` + +3. Run `kubectl describe ocirepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the OCIRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b + Last Update Time: 2025-06-14T11:23:36Z + Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Revision: latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Size: 1105 + URL: http://source-controller.flux-system.svc.cluster.local./ocirepository/oci/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Conditions: + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact with revision 'latest/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' + ``` + +## Writing an OCIRepository spec + +As with all other Kubernetes config, an OCIRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of an OCIRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +An OCIRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the address of the +container image repository in the format `oci://://`. + +**Note:** that specifying a tag or digest is not acceptable for this field. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used for +authentication purposes. + +Supported options are: + +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when +static credentials are used for authentication, either with +`spec.secretRef` or `spec.serviceAccountName`. +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS +worker node IAM role or IAM Role for Service Accounts (IRSA), and by extension +gain access to ECR. + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +When using IRSA to enable access to ECR, add the following patch to your +bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running +on it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes +or Workload Identity, and by extension gain access to GCR or Artifact Registry. + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and +Artifact Registry, source-controller running on it will also have access to them. + +When using Workload Identity to enable access to GCR or Artifact Registry, add +the following patch to your bootstrap repository, in the +`flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using +Google Container Registry service, the needed permission is instead `storage.objects.list` +which can be bound as part of the Container Registry Service Agent role. +Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the OCIRepository, containing authentication +credentials for the OCI repository. + +This secret is expected to be in the same format as [`imagePullSecrets`][image-pull-secrets]. +The usual way to create such a secret is with: + +```sh +kubectl create secret docker-registry ... +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as OCIRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible image repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards an OCI repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: oci://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +OCI repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple OCIRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for OCI operations +like pulling. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the OCI reference to resolve and +watch for changes. References are specified in one or more subfields +(`.tag`, `.semver`, `.digest`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to the `latest` +tag. + +#### Tag example + +To pull a specific tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + tag: "" +``` + +#### SemVer example + +To pull a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.tag`](#tag-example). + +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + +#### Digest example + +To pull a specific digest, use `.spec.ref.digest`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + digest: "sha256:" +``` + +This field takes precedence over all other fields. + +### Layer selector + +`spec.layerSelector` is an optional field to specify which layer should be extracted from the OCI Artifact. +If not specified, the controller will extract the first layer found in the artifact. + +To extract a layer matching a specific +[OCI media type](https://github.com/opencontainers/image-spec/blob/v1.0.2/media-types.md): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + layerSelector: + mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + operation: extract # can be 'extract' or 'copy', defaults to 'extract' +``` + +If the layer selector matches more than one layer, the first layer matching the specified media type will be used. +Note that the selected OCI layer must be +[compressed](https://github.com/opencontainers/image-spec/blob/v1.0.2/layer.md#gzip-media-types) +in the `tar+gzip` format. + +When `.spec.layerSelector.operation` is set to `copy`, instead of extracting the +compressed layer, the controller copies the tarball as-is to storage, thus +keeping the original content unaltered. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Verification + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available OCI artifacts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying artifacts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +OCIRepository. When set to `true`, the controller will stop reconciling the +OCIRepository, and changes to the resource or in the OCI repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +## Working with OCIRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the OCI repository contents as an Artifact. +It is possible to overwrite and/or overrule the default exclusions using +the [`.spec.ignore` field](#ignore). + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the artifact. +The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern +entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the artifact root or in subdirectories. + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a OCIRepository outside the +[specified interval window](#interval), an OCIRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the OCIRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite ocirepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source oci +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the OCIRepository to reach +a [ready state](#ready-ocirepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of an OCIRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend an OCIRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source oci +``` + +**Note:** When an OCIRepository has an Artifact and it is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +OCIRepository's Status until it is resumed. + +#### Resume an OCIRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source oci +``` + +### Debugging an OCIRepository + +There are several ways to gather information about a OCIRepository for +debugging purposes. + +#### Describe the OCIRepository + +Describing an OCIRepository using +`kubectl describe ocirepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2025-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: False + Type: Ready + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./ocirepository/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning OCIOperationFailed 2s (x9 over 4s) source-controller failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +#### Trace emitted Events + +To view events for specific OCIRepository(s), `kubectl events` can be used +in combination with `--for` to list the Events for specific objects. For +example, running + +```sh +kubectl events --for OCIRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact ocirepository/ stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote revision: 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +94s Warning OCIOperationFailed ocirepository/ failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific OCIRepository, e.g. +`flux logs --level=error --kind=OCIRepository --name=`. + +## OCIRepository Status + +### Artifact + +The OCIRepository reports the latest synchronized state from the OCI repository +as an Artifact object in the `.status.artifact` of the resource. + +The `.status.artifact.revision` holds the tag and SHA256 digest of the upstream OCI artifact. + +The `.status.artifact.metadata` holds the upstream OCI artifact metadata such as the +[OpenContainers standard annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md). +If the OCI artifact was created with `flux push artifact`, then the `metadata` will contain the following +annotations: +- `org.opencontainers.image.created` the date and time on which the artifact was built +- `org.opencontainers.image.source` the URL of the Git repository containing the source files +- `org.opencontainers.image.revision` the Git branch and commit SHA1 of the source files + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +status: + artifact: + digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 + lastUpdateTime: "2025-08-08T09:35:45Z" + metadata: + org.opencontainers.image.created: "2025-08-08T12:31:41+03:00" + org.opencontainers.image.revision: 6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872 + org.opencontainers.image.source: https://github.com/stefanprodan/podinfo.git + path: ocirepository///.tar.gz + revision: @ + size: 1105 + url: http://source-controller..svc.cluster.local./ocirepository///.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +OCIRepository has various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-ocirepository) while fetching the remote +state, it can be [ready](#ready-ocirepository), or it can [fail during +reconciliation](#failed-ocirepository). + +The OCIRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the OCIRepository to +become `Ready`. + +#### Reconciling OCIRepository + +The source-controller marks an OCIRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the OCIRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the OCIRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact digest differs from the current Artifact. + +When the OCIRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the OCIRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the OCIRepository while their status value is `"True"`. + +#### Ready OCIRepository + +The source-controller marks an OCIRepository as _ready_ when it has the +following characteristics: + +- The OCIRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote OCI repository using + the current spec. +- The digest of the reported Artifact is up-to-date with the latest + resolved digest of the remote OCI repository. + +When the OCIRepository is "ready", the controller sets a Condition with the +following attributes in the OCIRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +OCIRepository is marked as [reconciling](#reconciling-ocirepository), or e.g. a +[transient error](#failed-ocirepository) occurs due to a temporary network issue. + +When the OCIRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +OCIRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed OCIRepository + +The source-controller may get stuck trying to produce an Artifact for a +OCIRepository without completing. This can occur due to some of the following +factors: + +- The remote OCI repository [URL](#url) is temporarily unavailable. +- The OCI repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The OCIRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the OCIRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: OCIArtifactPullFailed` | `reason: OCIArtifactLayerOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the OCIRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the signature +[verification](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "False"` +- `reason: VerificationError` + +While the OCIRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the OCIRepository is marked as +[ready](#ready-ocirepository). + +Note that a OCIRepository can be [reconciling](#reconciling-ocirepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the OCIRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the OCIRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-ocirepository), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. It is also used by the controller to determine if +an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Layer Selector + +The source-controller reports an observed layer selector in the OCIRepository's +`.status.observedLayerSelector`. The observed layer selector is the latest +`.spec.layerSelector` value which resulted in a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human intervention. +The value is the same as the [layer selector in spec](#layer-selector). +It indicates the layer selection configuration used in building the current +artifact in storage. It is also used by the controller to determine if an +artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedLayerSelector: + mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip + operation: copy + ... +``` + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the OCIRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus +[image-pull-secrets]: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +[image-auto-provider-secrets]: https://fluxcd.io/flux/guides/image-update/#imagerepository-cloud-providers-authentication +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[sops-guide]: https://fluxcd.io/flux/guides/mozilla-sops/ diff --git a/docs/spec/v1alpha1/buckets.md b/docs/spec/v1alpha1/buckets.md index 7addeccd4..bb2c07a96 100644 --- a/docs/spec/v1alpha1/buckets.md +++ b/docs/spec/v1alpha1/buckets.md @@ -231,4 +231,4 @@ Wait for ready condition: ```bash kubectl -n gitios-system wait bucket/podinfo --for=condition=ready --timeout=1m -``` +``` \ No newline at end of file diff --git a/docs/spec/v1alpha1/helmrepositories.md b/docs/spec/v1alpha1/helmrepositories.md index 966460dfd..e2d1bfc2f 100644 --- a/docs/spec/v1alpha1/helmrepositories.md +++ b/docs/spec/v1alpha1/helmrepositories.md @@ -19,7 +19,7 @@ type HelmRepositorySpec struct { // repository. // For HTTP/S basic auth the secret must contain username and // password fields. - // For TLS the secret must contain caFile, keyFile and caCert + // For TLS the secret must contain caFile, keyFile and caFile // fields. // +optional SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` diff --git a/docs/spec/v1beta1/buckets.md b/docs/spec/v1beta1/buckets.md index fc9f566c5..70b77ec48 100644 --- a/docs/spec/v1beta1/buckets.md +++ b/docs/spec/v1beta1/buckets.md @@ -11,7 +11,7 @@ Bucket: // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). - // +kubebuilder:validation:Enum=generic;aws + // +kubebuilder:validation:Enum=generic;aws;gcp // +optional Provider string `json:"provider,omitempty"` @@ -40,7 +40,7 @@ type BucketSpec struct { // +required Interval metav1.Duration `json:"interval"` - // The timeout for download operations, defaults to 20s. + // The timeout for download operations, defaults to 60s. // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -62,6 +62,7 @@ Supported providers: const ( GenericBucketProvider string = "generic" AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" ) ``` @@ -114,9 +115,13 @@ in a gzip compressed TAR archive (`.tar.gz`). ### Excluding files -Git files (`.git/`, `.gitignore`, `.gitmodules`, and `.gitattributes`) are -excluded from the archive by default, as well as some extensions (`.jpg, .jpeg, -.gif, .png, .wmv, .flv, .tar.gz, .zip`) +The following files and extensions are excluded from the archive by default: + +- Git files (`.git/ ,.gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) Excluding additional files from the archive is possible by adding a `.sourceignore` file in the root of the bucket. The `.sourceignore` file @@ -178,7 +183,8 @@ data: secretkey: ``` -> **Note:** that for Google Cloud Storage you have to enable +> **Note:** that when using the generic provider +> for Google Cloud Storage you have to enable > S3 compatible access in your GCP project. ### AWS IAM authentication @@ -204,6 +210,104 @@ spec: > **Note:** that on EKS you have to create an IAM role for the source-controller > service account that grants access to the bucket. +### AWS IAM bucket policy example + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::podinfo/*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::podinfo" + } + ] +} +``` + +### GCP Provider + +When the provider is `gcp` and the `secretRef` is not specified, +the GCP client authenticates using workload identity. +The GCP client automatically handles authentication in two ways. +The first way being that the GCP client library will automatically +check for the presence of the GOOGLE_APPLICATION_CREDENTIAL +environment variable. If this is not found, the GCP client library +will search for the Google Application Credential file in the config directory: + +```yaml +apiVersion: source.toolkit.fluccd.io/v1beta1 +kind: Bucket +metadata: + name: podinfo + namespace: gitops-system +spec: + interval: 5m + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +When the provider is `gcp` and the `secretRef` is specified, +the GCP client authenticates using a Kubernetes secret named serviceaccount +which is a base 64 encoded string of the GCP service account JSON file: + +```yaml +apiVersion: source.toolkit.fluccd.io/v1beta1 +kind: Bucket +metadata: + name: podinfo + namespace: gitops-system +spec: + interval: 5m + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s + secretRef: + name: gcp-service-account +--- +apiVersion: v1 +kind: Secret +metadata: + name: gcp-service-account + namespace: gitops-system +type: Opaque +data: + serviceaccount: "ewogICAgInR5cGUiOiAic2VydmljZV9hY2NvdW50IiwKICAgICJwcm9qZWN0X2lkIjogInBvZGluZm8iLAogICAgInByaXZhdGVfa2V5X2lkIjogIjI4cXdnaDNnZGY1aGozZ2I1ZmozZ3N1NXlmZ2gzNGY0NTMyNDU2OGh5MiIsCiAgICAicHJpdmF0ZV9rZXkiOiAiLS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tXG5Id2V0aGd5MTIzaHVnZ2hoaGJkY3U2MzU2ZGd5amhzdmd2R0ZESFlnY2RqYnZjZGhic3g2M2Ncbjc2dGd5Y2ZlaHVoVkdURllmdzZ0N3lkZ3lWZ3lkaGV5aHVnZ3ljdWhland5NnQzNWZ0aHl1aGVndmNldGZcblRGVUhHVHlnZ2h1Ymh4ZTY1eWd0NnRneWVkZ3kzMjZodWN5dnN1aGJoY3Zjc2poY3NqaGNzdmdkdEhGQ0dpXG5IY3llNnR5eWczZ2Z5dWhjaGNzYmh5Z2NpamRiaHl5VEY2NnR1aGNldnVoZGNiaHVoaHZmdGN1aGJoM3VoN3Q2eVxuZ2d2ZnRVSGJoNnQ1cmZ0aGh1R1ZSdGZqaGJmY3JkNXI2N3l1aHV2Z0ZUWWpndnRmeWdoYmZjZHJoeWpoYmZjdGZkZnlodmZnXG50Z3ZnZ3RmeWdodmZ0NnR1Z3ZURjVyNjZ0dWpoZ3ZmcnR5aGhnZmN0Nnk3eXRmcjVjdHZnaGJoaHZ0Z2hoanZjdHRmeWNmXG5mZnhmZ2hqYnZnY2d5dDY3dWpiZ3ZjdGZ5aFZDN3VodmdjeWp2aGhqdnl1amNcbmNnZ2hndmdjZmhnZzc2NTQ1NHRjZnRoaGdmdHloaHZ2eXZ2ZmZnZnJ5eXU3N3JlcmVkc3dmdGhoZ2ZjZnR5Y2ZkcnR0ZmhmL1xuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAgICJjbGllbnRfZW1haWwiOiAidGVzdEBwb2RpbmZvLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAgICJjbGllbnRfaWQiOiAiMzI2NTc2MzQ2Nzg3NjI1MzY3NDYiLAogICAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAgICJ0b2tlbl91cmkiOiAiaHR0cHM6Ly9vYXV0aDIuZ29vZ2xlYXBpcy5jb20vdG9rZW4iLAogICAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICAgImNsaWVudF94NTA5X2NlcnRfdXJsIjogImh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3JvYm90L3YxL21ldGFkYXRhL3g1MDkvdGVzdCU0MHBvZGluZm8uaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=" +``` + +> **Note:** the serviceaccount secret is a base 64 encoded form of +> the GCP service account json file like so + +```json + { + "type": "service_account", + "project_id": "podinfo", + "private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2", + "private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n", + "client_email": "test@podinfo.iam.gserviceaccount.com", + "client_id": "32657634678762536746", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com" + } +``` +> **Note:** that when using the gcp provider for +> Google Cloud Storage you do not have to enable +> S3 compatible access in your GCP project. + ## Status examples Successful download: diff --git a/docs/spec/v1beta1/gitrepositories.md b/docs/spec/v1beta1/gitrepositories.md index ef42aa763..93f0f33ca 100644 --- a/docs/spec/v1beta1/gitrepositories.md +++ b/docs/spec/v1beta1/gitrepositories.md @@ -19,8 +19,8 @@ type GitRepositorySpec struct { // The secret name containing the Git credentials. // For HTTPS repositories the secret must contain username and password // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For SSH repositories the secret must contain identity and known_hosts + // fields. // +optional SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` @@ -28,7 +28,7 @@ type GitRepositorySpec struct { // +required Interval metav1.Duration `json:"interval"` - // The timeout for remote Git operations like cloning, defaults to 20s. + // The timeout for remote Git operations like cloning, defaults to 60s. // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -57,6 +57,14 @@ type GitRepositorySpec struct { // +kubebuilder:default:=go-git // +optional GitImplementation string `json:"gitImplementation,omitempty"` + + // When enabled, after the clone is created, initializes all submodules within. + // This option is available only when using the 'go-git' GitImplementation. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Extra git repositories to map into the repository + Include []GitRepositoryInclude `json:"include,omitempty"` } ``` @@ -144,9 +152,13 @@ gzip compressed TAR archive (`.tar.gz`). ### Excluding files -Git files (`.git/`, `.gitignore`, `.gitmodules`, and `.gitattributes`) are -excluded from the archive by default, as well as some extensions (`.jpg, .jpeg, -.gif, .png, .wmv, .flv, .tar.gz, .zip`) +The following files and extensions are excluded from the archive by default: + +- Git files (`.git/ ,.gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) Excluding additional files from the archive is possible by adding a `.sourceignore` file in the root of the repository. The `.sourceignore` file @@ -185,7 +197,7 @@ comes with its own set of drawbacks. Some git providers like Azure DevOps require that the git client supports specific capabilities to be able to communicate. The initial library used in source-controller did not support -this functionality while other libraries that did were missinging other critical functionality, +this functionality while other libraries that did were missing other critical functionality, specifically the ability to do shallow cloning. Shallow cloning is important as it allows source-controller to only fetch the latest commits, instead of the whole git history. For some very large repositories this means downloading GB of data that could fill the disk @@ -194,10 +206,10 @@ and also impact the traffic costs. To be able to support Azure DevOps a compromise solution was built, giving the user the option to select the git library while accepting the drawbacks. -| Git Implementation | Shallow Clones | V2 Protocol Support | -|---|---|---| -| 'go-git' | true | false | -| 'libgit2' | false | true | +| Git Implementation | Shallow Clones | Git Submodules | V2 Protocol Support | +| --- | --- | --- | --- | +| 'go-git' | true | true | false | +| 'libgit2' | false | false | true | Pull the master branch from a repository in Azure DevOps. @@ -213,6 +225,21 @@ spec: gitImplementation: libgit2 ``` +## Git Proxy + +A Git proxy can be configured by setting the appropriate environment variables +for proxy configurations, for example `HTTPS_PROXY`, `NO_PROXY`, etc., in the +source-controller pod. There may be some limitations in the proxy support based +on the Git implementations. + +| Git Implementation | HTTP_PROXY | HTTPS_PROXY | NO_PROXY | Self-signed Certs | +| --- | --- | --- | --- | --- | +| 'go-git' | true | true | true | false | +| 'libgit2' | false | true | false | true | + +**NOTE:** libgit2 v1.2.0 supports `NO_PROXY`, but source-controller uses +libgit2 v1.1.1 at the moment. + ## Spec examples ### Checkout strategies @@ -261,6 +288,21 @@ spec: commit: 363a6a8fe6a7f13e05d34c163b0ef02a777da20a ``` +Checkout a specific commit: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 1m + url: https://github.com/stefanprodan/podinfo + ref: + commit: 363a6a8fe6a7f13e05d34c163b0ef02a777da20a +``` + Pull a specific tag: ```yaml @@ -318,7 +360,36 @@ data: password: ``` -> **Note:** that self-signed certificates are not supported. +### HTTPS self-signed certificates + +Cloning over HTTPS from a Git repository with a self-signed certificate: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 1m + url: https://customdomain.com/stefanprodan/podinfo + secretRef: + name: https-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-credentials + namespace: default +type: Opaque +data: + username: + password: + caFile: +``` + +It is also possible to specify a `caFile` for public repositories, in that case the username and password +can be omitted. ### SSH authentication @@ -363,6 +434,17 @@ kubectl create secret generic ssh-credentials \ --from-file=./known_hosts ``` +If your SSH key is protected with a passphrase, +you can specify it in the Kubernetes secret under the `password` key: + +```sh +kubectl create secret generic ssh-credentials \ + --from-file=./identity \ + --from-file=./identity.pub \ + --from-file=./known_hosts \ + --from-literal=password= +``` + ### GPG signature verification Verify the OpenPGP signature for the commit that master branch HEAD points to: @@ -405,6 +487,99 @@ kubectl create secret generic pgp-public-keys \ --from-file=author2.asc ``` +### Git submodules + +With `spec.recurseSubmodules` you can configure the controller to +clone a specific branch including its Git submodules: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: repo-with-submodules + namespace: default +spec: + interval: 1m + url: https://github.com// + secretRef: + name: https-credentials + ref: + branch: main + recurseSubmodules: true +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-credentials + namespace: default +type: Opaque +data: + username: + password: +``` + +Note that deploy keys can't be used to pull submodules from private repositories +as GitHub and GitLab doesn't allow a deploy key to be reused across repositories. +You have to use either HTTPS token-based authentication, or an SSH key belonging +to a user that has access to the main repository and all its submodules. + +### Including GitRepository + +With `spec.include` you can map the contents of a Git repository into another. +This may look identical to Git submodules but has multiple benefits over +regular submodules: + +* Including a `GitRepository` allows you to use different authentication methods for different repositories. +* A change in the included repository will trigger an update of the including repository. +* Multiple `GitRepositories` could include the same repository, which decreases the amount of cloning done compared to using submodules. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: app-repo + namespace: default +spec: + interval: 1m + url: https://github.com//app-repo + secretRef: + name: https-credentials + ref: + branch: main +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: config-repo + namespace: default +spec: + interval: 1m + url: https://github.com//config-repo + secretRef: + name: https-credentials + ref: + branch: main + include: + - repository: + name: app-repo + fromPath: deploy/kubernetes + toPath: base/app +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-credentials + namespace: default +type: Opaque +data: + username: + password: +``` + +The `fromPath` and `toPath` parameters allows you to limit the files included and where they will be +copied to in the main repository. If you do not specify a value for `fromPath` all files in the +repository will be included. The `toPath` value will default to the name of the repository. + ## Status examples Successful sync: diff --git a/docs/spec/v1beta1/helmcharts.md b/docs/spec/v1beta1/helmcharts.md index 3a96c08fd..6c4461c2e 100644 --- a/docs/spec/v1beta1/helmcharts.md +++ b/docs/spec/v1beta1/helmcharts.md @@ -28,9 +28,28 @@ type HelmChartSpec struct { // +required Interval metav1.Duration `json:"interval"` - // Alternative values file to use as the default chart values, expected to be a - // relative path in the SourceRef. Ignored when omitted. + // Determines what enables the creation of a new artifact. Valid values are + // ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // Alternative list of values files to use as the chart values (values.yaml + // is not included by default), expected to be a relative path in the SourceRef. + // Values files are merged in the order of this list with the last file overriding + // the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // Alternative values file to use as the default chart values, expected to + // be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, + // for backwards compatibility the file defined here is merged before the + // ValuesFiles items. Ignored when omitted. + // +optional + // +deprecated ValuesFile string `json:"valuesFile,omitempty"` // This flag tells the controller to suspend the reconciliation of this source. @@ -39,6 +58,18 @@ type HelmChartSpec struct { } ``` +### Reconciliation strategies + +```go +const ( + // ReconcileStrategyChartVersion creates a new chart artifact when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision creates a new chart artifact when the Revision of the SourceRef is different. + ReconcileStrategyRevision string = "Revision" +) +``` + ### Reference types ```go @@ -182,6 +213,61 @@ spec: interval: 10m ``` +Override default values with alternative values files relative to the +path in the SourceRef: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmChart +metadata: + name: redis + namespace: default +spec: + chart: redis + version: 10.5.7 + sourceRef: + name: stable + kind: HelmRepository + interval: 5m + valuesFiles: + - values.yaml + - values-production.yaml +``` + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmChart +metadata: + name: podinfo + namespace: default +spec: + chart: ./charts/podinfo + sourceRef: + name: podinfo + kind: GitRepository + interval: 10m + valuesFiles: + - ./charts/podinfo/values.yaml + - ./charts/podinfo/values-production.yaml +``` + +Reconcile with every change to the source revision: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmChart +metadata: + name: podinfo + namespace: default +spec: + chart: ./charts/podinfo + sourceRef: + name: podinfo + kind: GitRepository + interval: 10m + reconcileStrategy: Revision +``` + ## Status examples Successful chart pull: diff --git a/docs/spec/v1beta1/helmrepositories.md b/docs/spec/v1beta1/helmrepositories.md index f82f6196e..c194a72a0 100644 --- a/docs/spec/v1beta1/helmrepositories.md +++ b/docs/spec/v1beta1/helmrepositories.md @@ -20,10 +20,19 @@ type HelmRepositorySpec struct { // For HTTP/S basic auth the secret must contain username and // password fields. // For TLS the secret must contain a certFile and keyFile, and/or - // caCert fields. - // +optional + // caFile fields. + // +optional SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` + // PassCredentials allows the credentials from the SecretRef to be passed on to + // a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the index + // differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result in + // credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + // The interval at which to check the upstream for updates. // +required Interval metav1.Duration `json:"interval"` diff --git a/docs/spec/v1beta2/README.md b/docs/spec/v1beta2/README.md new file mode 100644 index 000000000..371015871 --- /dev/null +++ b/docs/spec/v1beta2/README.md @@ -0,0 +1,21 @@ +# source.toolkit.fluxcd.io/v1beta2 + +This is the v1beta2 API specification for defining the desired state sources of Kubernetes clusters. + +## Specification + +* Source kinds: + + [GitRepository](gitrepositories.md) + + [OCIRepository](ocirepositories.md) + + [HelmRepository](helmrepositories.md) + + [HelmChart](helmcharts.md) + + [Bucket](buckets.md) + +## Implementation + +* [source-controller](https://github.com/fluxcd/source-controller/) + +## Consumers + +* [kustomize-controller](https://github.com/fluxcd/kustomize-controller/) +* [helm-controller](https://github.com/fluxcd/helm-controller/) diff --git a/docs/spec/v1beta2/buckets.md b/docs/spec/v1beta2/buckets.md new file mode 100644 index 000000000..a78516f88 --- /dev/null +++ b/docs/spec/v1beta2/buckets.md @@ -0,0 +1,1382 @@ +# Buckets + + + +The `Bucket` API defines a Source to produce an Artifact for objects from storage +solutions like Amazon S3, Google Cloud Storage buckets, or any other solution +with a S3 compatible API such as Minio, Alibaba Cloud OSS and others. + +## Example + +The following is an example of a Bucket. It creates a tarball (`.tar.gz`) +Artifact with the fetched objects from an object storage with an S3 +compatible API (e.g. [Minio](https://min.io)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: minio-bucket + namespace: default +spec: + interval: 5m0s + endpoint: minio.example.com + insecure: true + secretRef: + name: minio-bucket-secret + bucketName: example +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-bucket-secret + namespace: default +type: Opaque +stringData: + accesskey: + secretkey: +``` + +In the above example: + +- A Bucket named `minio-bucket` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the object storage bucket every five minutes, + indicated by the `.spec.interval` field. +- It authenticates to the `minio.example.com` endpoint with + the static credentials from the `minio-secret` Secret data, indicated by + the `.spec.endpoint` and `.spec.secretRef.name` fields. +- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag) + in the `.spec.bucketName` bucket is compiled, while filtering the keys using + [default ignore rules](#default-exclusions). +- The digest (algorithm defaults to SHA256) of the list is used as Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current Bucket revision differs from the latest calculated revision, + all objects are fetched and archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `bucket.yaml`, and +changing the Bucket and Secret values to target a Minio instance you have +control over. + +**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see +[Provider](#provider). + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f bucket.yaml + ``` + +2. Run `kubectl get buckets` to see the Bucket: + + ```console + NAME ENDPOINT AGE READY STATUS + minio-bucket minio.example.com 34s True stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ``` + +3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the Bucket's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 + Last Update Time: 2022-02-01T23:43:38Z + Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Revision: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Size: 38099 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Conditions: + Last Transition Time: 2022-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 82s source-controller stored artifact with 16 fetched files from 'example' bucket + ``` + +## Writing a Bucket spec + +As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and +`metadata` fields. The name of a Bucket object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A Bucket also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Provider + +The `.spec.provider` field allows for specifying a Provider to enable provider +specific configurations, for example to communicate with a non-S3 compatible +API endpoint, or to change the authentication method. + +Supported options are: + +- [Generic](#generic) +- [AWS](#aws) +- [Azure](#azure) +- [GCP](#gcp) + +If you do not specify `.spec.provider`, it defaults to `generic`. + +#### Generic + +When a Bucket's `spec.provider` is set to `generic`, the controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go), which can communicate +with any Amazon S3 compatible object storage (including +[GCS](https://cloud.google.com/storage/docs/interoperability), +[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-), +and many others). + +The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a +Secret with `.data.accesskey` and `.data.secretkey` values, used to +authenticate with static credentials. + +The Provider allows for specifying a region the bucket is in using the +[`.spec.region` field](#region), if required by the [Endpoint](#endpoint). + +##### Generic example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: generic-insecure + namespace: default +spec: + provider: generic + interval: 5m0s + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + timeout: 60s + insecure: true + secretRef: + name: minio-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### AWS + +When a Bucket's `.spec.provider` field is set to `aws`, the source-controller +will attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go). + +Without a [Secret reference](#secret-reference), authorization using +credentials retrieved from the AWS EC2 service is attempted by default. When +a reference is specified, it expects a Secret with `.data.accesskey` and +`.data.secretkey` values, used to authenticate with static credentials. + +The Provider allows for specifying the +[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) +using the [`.spec.region` field](#region). + +##### AWS EC2 example + +**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for +the source-controller service account that grants access to the bucket. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS IAM role example + +Replace `` with the specified `.spec.bucketName`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} +``` + +##### AWS static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + secretRef: + name: aws-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: aws-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### Azure + +When a Bucket's `.spec.provider` is set to `azure`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Azure Blob Storage SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob). + +Without a [Secret reference](#secret-reference), authentication using a chain +with: + +- [Environment credentials](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential) +- [Workload Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.3.0-beta.4#WorkloadIdentityCredential) +- [Managed Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) + with the `AZURE_CLIENT_ID` +- Managed Identity with a system-assigned identity + +is attempted by default. If no chain can be established, the bucket +is assumed to be publicly reachable. + +When a reference is specified, it expects a Secret with one of the following +sets of `.data` fields: + +- `tenantId`, `clientId` and `clientSecret` for authenticating a Service + Principal with a secret. +- `tenantId`, `clientId` and `clientCertificate` (plus optionally + `clientCertificatePassword` and/or `clientCertificateSendChain`) for + authenticating a Service Principal with a certificate. +- `clientId` for authenticating using a Managed Identity. +- `accountKey` for authenticating using a + [Shared Key](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#SharedKeyCredential). +- `sasKey` for authenticating using a [SAS Token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) + +For any Managed Identity and/or Azure Active Directory authentication method, +the base URL can be configured using `.data.authorityHost`. If not supplied, +[`AzurePublicCloud` is assumed](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AuthorityHost). + +##### Azure example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-public + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: podinfo + endpoint: https://podinfoaccount.blob.core.windows.net + timeout: 30s +``` + +##### Azure Service Principal Secret example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-service-principal-secret + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientSecret: +``` + +##### Azure Service Principal Certificate example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-service-principal-cert + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientCertificate: + # Plus optionally + clientCertificatePassword: + clientCertificateSendChain: # either "1" or "true" +``` + +##### Azure Managed Identity with Client ID example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-managed-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-smi-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-smi-auth + namespace: default +type: Opaque +data: + clientId: +``` + +##### Azure Blob Shared Key example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-shared-key + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + accountKey: +``` + +##### Workload Identity + +If you have [Workload Identity](https://azure.github.io/azure-workload-identity/docs/installation/managed-clusters.html) +set up on your cluster, you need to create an Azure Identity and give it +access to Azure Blob Storage. + +```shell +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +``` + +Establish a federated identity between the Identity and the source-controller +ServiceAccount. + +```shell +export SERVICE_ACCOUNT_ISSUER="$(az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -otsv)" + +az identity federated-credential create \ + --name "kubernetes-federated-credential" \ + --identity-name "${IDENTITY_NAME}" \ + --resource-group "${RESOURCE_GROUP}" \ + --issuer "${SERVICE_ACCOUNT_ISSUER}" \ + --subject "system:serviceaccount:flux-system:source-controller" +``` + +Add a patch to label and annotate the source-controller Deployment and ServiceAccount +correctly so that it can match an identity binding: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +If you have set up Workload Identity correctly and labeled the source-controller +Deployment and ServiceAccount, then you don't need to reference a Secret. For more information, +please see [documentation](https://azure.github.io/azure-workload-identity/docs/quick-start.html). + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testsas + endpoint: https://testfluxsas.blob.core.windows.net +``` + +##### Deprecated: Managed Identity with AAD Pod Identity + +If you are using [aad pod identity](https://azure.github.io/aad-pod-identity/docs), +You need to create an Azure Identity and give it access to Azure Blob Storage. + +```sh +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" + +export IDENTITY_CLIENT_ID="$(az identity show -n ${IDENTITY_NAME} -g ${RESOURCE_GROUP} -otsv --query clientId)" +export IDENTITY_RESOURCE_ID="$(az identity show -n ${IDENTITY_NAME} -otsv --query id)" +``` + +Create an AzureIdentity object that references the identity created above: + +```yaml +--- +apiVersion: aadpodidentity.k8s.io/v1 +kind: AzureIdentity +metadata: + name: # source-controller label will match this name + namespace: flux-system +spec: + clientID: + resourceID: + type: 0 # user-managed identity +``` + +Create an AzureIdentityBinding object that binds Pods with a specific selector +with the AzureIdentity created: + +```yaml +apiVersion: "aadpodidentity.k8s.io/v1" +kind: AzureIdentityBinding +metadata: + name: ${IDENTITY_NAME}-binding +spec: + azureIdentity: ${IDENTITY_NAME} + selector: ${IDENTITY_NAME} +``` + +Label the source-controller Deployment correctly so that it can match an identity binding: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kustomize-controller + namespace: flux-system +spec: + template: + metadata: + labels: + aadpodidbinding: ${IDENTITY_NAME} # match the AzureIdentity name +``` + +If you have set up aad-pod-identity correctly and labeled the source-controller +Deployment, then you don't need to reference a Secret. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testsas + endpoint: https://testfluxsas.blob.core.windows.net +``` + +##### Azure Blob SAS Token example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: azure-sas-token + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + sasKey: +``` + +The `sasKey` only contains the SAS token e.g +`?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. +The leading question mark (`?`) is optional. The query values from the `sasKey` +data field in the Secrets gets merged with the ones in the `.spec.endpoint` of +the Bucket. If the same key is present in the both of them, the value in the +`sasKey` takes precedence. + +**Note:** The SAS token has an expiry date, and it must be updated before it +expires to allow Flux to continue to access Azure Storage. It is allowed to use +an account-level or container-level SAS token. + +The minimum permissions for an account-level SAS token are: + +- Allowed services: `Blob` +- Allowed resource types: `Container`, `Object` +- Allowed permissions: `Read`, `List` + +The minimum permissions for a container-level SAS token are: + +- Allowed permissions: `Read`, `List` + +Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/storageservices/create-account-sas#blob-service) for a full overview on permissions. + +#### GCP + +When a Bucket's `.spec.provider` is set to `gcp`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Google Client SDK](https://github.com/googleapis/google-api-go-client). + +Without a [Secret reference](#secret-reference), authorization using a +workload identity is attempted by default. The workload identity is obtained +using the `GOOGLE_APPLICATION_CREDENTIALS` environment variable, falling back +to the Google Application Credential file in the config directory. +When a reference is specified, it expects a Secret with a `.data.serviceaccount` +value with a GCP service account JSON file. + +The Provider allows for specifying the +[Bucket location](https://cloud.google.com/storage/docs/locations) using the +[`.spec.region` field](#region). + +##### GCP example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: gcp-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +##### GCP static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: gcp-secret + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: + endpoint: storage.googleapis.com + region: + secretRef: + name: gcp-service-account +--- +apiVersion: v1 +kind: Secret +metadata: + name: gcp-service-account + namespace: default +type: Opaque +data: + serviceaccount: +``` + +Where the (base64 decoded) value of `.data.serviceaccount` looks like this: + +```json +{ + "type": "service_account", + "project_id": "example", + "private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2", + "private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n", + "client_email": "test@example.iam.gserviceaccount.com", + "client_id": "32657634678762536746", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com" +} +``` + +### Interval + +`.spec.interval` is a required field that specifies the interval which the +object storage bucket must be consulted at. + +After successfully reconciling a Bucket object, the source-controller requeues +the object for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the object storage bucket every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. the apply of a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple Bucket objects are set up +with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Endpoint + +`.spec.endpoint` is a required field that specifies the HTTP/S object storage +endpoint to connect to and fetch objects from. Connecting to an (insecure) +HTTP endpoint requires enabling [`.spec.insecure`](#insecure). + +Some endpoints require the specification of a [`.spec.region`](#region), +see [Provider](#provider) for more (provider specific) examples. + +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Bucket name + +`.spec.bucketName` is a required field that specifies which object storage +bucket on the [Endpoint](#endpoint) objects should be fetched from. + +See [Provider](#provider) for more (provider specific) examples. + +### Region + +`.spec.region` is an optional field to specify the region a +[`.spec.bucketName`](#bucket-name) is located in. + +See [Provider](#provider) for more (provider specific) examples. + +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +[endpoint](#endpoint), if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for object storage +fetch operations. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. +The default value is `60s`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the Bucket, containing authentication +credentials for the object storage. For some `.spec.provider` implementations +the presence of the field is required, see [Provider](#provider) for more +details and examples. + +### Prefix + +`.spec.prefix` is an optional field to enable server-side filtering +of files in the Bucket. + +**Note:** The server-side filtering works only with the `generic`, `aws` +and `gcp` [provider](#provider) and is preferred over [`.spec.ignore`](#ignore) +as a more efficient way of excluding files. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage +objects which keys match the defined rules are excluded while fetching. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket. +When set to `true`, the controller will stop reconciling the Bucket, and changes +to the resource or in the object storage bucket will not result in a new +Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with Buckets + +### Excluding files + +By default, storage bucket objects which match the [default exclusion +rules](#default-exclusions) are excluded while fetching. It is possible to +overwrite and/or overrule the default exclusions using a file in the bucket +and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the root of the +object storage bucket. The `.sourceignore` file follows [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +#### Ignore spec + +Another option is to define the exclusions within the Bucket spec, using the +[`.spec.ignore` field](#ignore). Specified rules override the +[default exclusion list](#default-exclusions), and may overrule `.sourceignore` +file exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a Bucket outside the +[specified interval window](#interval), a Bucket can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the Bucket for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite bucket/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source bucket +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the Bucket to reach a +[ready state](#ready-bucket) using `kubectl`: + +```sh +kubectl wait bucket/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a Bucket, you can suspend it using the [`.spec.suspend` +field](#suspend). + +#### Suspend a Bucket + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source bucket +``` + +**Note:** When a Bucket has an Artifact and is suspended, and this Artifact +later disappears from the storage due to e.g. the source-controller Pod being +evicted from a Node, this will not be reflected in the Bucket's Status until it +is resumed. + +#### Resume a Bucket + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source bucket +``` + +### Debugging a Bucket + +There are several ways to gather information about a Bucket for debugging +purposes. + +#### Describe the Bucket + +Describing a Bucket using `kubectl describe bucket ` displays the +latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-02T13:26:55Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2022-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: False + Type: Ready + Last Transition Time: 2022-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist +``` + +#### Trace emitted Events + +To view events for specific Bucket(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for Bucket/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m30s Normal NewArtifact bucket/ fetched 16 files with revision from 'my-new-bucket' +36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +18s Warning BucketOperationFailed bucket/ bucket 'my-new-bucket' does not exist +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=`. + +## Bucket Status + +### Artifact + +The Bucket reports the latest synchronized state from the object storage +bucket as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive +(`.tar.gz`), and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: +status: + artifact: + digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a + lastUpdateTime: "2022-01-28T10:30:30Z" + path: bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + size: 38099 + url: http://source-controller..svc.cluster.local./bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A Bucket enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-bucket) while fetching storage objects, +it can be [ready](#ready-bucket), or it can [fail during +reconciliation](#failed-bucket). + +The Bucket API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the Bucket to become +`Ready`. + +#### Reconciling Bucket + +The source-controller marks a Bucket as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the Bucket, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the Bucket is newer than the [Observed Generation](#observed-generation). +- The newly calculated Artifact revision differs from the current Artifact. + +When the Bucket is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the Bucket's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the Bucket while their status value is `"True"`. + +#### Ready Bucket + +The source-controller marks a Bucket as _ready_ when it has the following +characteristics: + +- The Bucket reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The Bucket was able to communicate with the Bucket's object storage endpoint + using the current spec. +- The revision of the reported Artifact is up-to-date with the latest + calculated revision of the object storage bucket. + +When the Bucket is "ready", the controller sets a Condition with the following +attributes in the Bucket's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the Bucket +is marked as [reconciling](#reconciling-bucket), or e.g. a +[transient error](#failed-bucket) occurs due to a temporary network issue. + +When the Bucket Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +Bucket's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed Bucket + +The source-controller may get stuck trying to produce an Artifact for a Bucket +without completing. This can occur due to some of the following factors: + +- The object storage [Endpoint](#endpoint) is temporarily unavailable. +- The specified object storage bucket does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The Bucket spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the Bucket's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: BucketOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the Bucket while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the Bucket has this Condition, the controller will continue to attempt +to produce an Artifact for the resource with an exponential backoff, until +it succeeds and the Bucket is marked as [ready](#ready-bucket). + +Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at +the same time, for example due to a newly introduced configuration issue in the +Bucket spec. When a reconciliation fails, the `Reconciling` Condition reason +would be `ProgressingWithRetry`. When the reconciliation is performed again +after the failure, the reason is updated to `Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the Bucket's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-bucket), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Generation + +The source-controller reports an +[observed generation][typical-status-properties] +in the Bucket's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1beta2/gitrepositories.md b/docs/spec/v1beta2/gitrepositories.md new file mode 100644 index 000000000..03ffbeb82 --- /dev/null +++ b/docs/spec/v1beta2/gitrepositories.md @@ -0,0 +1,981 @@ +# Git Repositories + + + +The `GitRepository` API defines a Source to produce an Artifact for a Git +repository revision. + +## Example + +The following is an example of a GitRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from a Git repository for the +resolved reference. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://github.com/stefanprodan/podinfo + ref: + branch: master +``` + +In the above example: + +- A GitRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the Git repository every five minutes, indicated + by the `.spec.interval` field. +- It clones the `master` branch of the `https://github.com/stefanprodan/podinfo` + repository, indicated by the `.spec.ref.branch` and `.spec.url` fields. +- The specified branch and resolved HEAD revision are used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current GitRepository revision differs from the latest fetched + revision, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `gitrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f gitrepository.yaml + ``` + +2. Run `kubectl get gitrepository` to see the GitRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://github.com/stefanprodan/podinfo 5s True stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + ``` + +3. Run `kubectl describe gitrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the GitRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2 + Last Update Time: 2022-02-14T11:23:36Z + Path: gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz + Revision: master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc + Size: 91318 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz + Conditions: + Last Transition Time: 2022-02-14T11:23:36Z + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-14T11:23:36Z + Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' + ``` + +## Writing a GitRepository spec + +As with all other Kubernetes config, a GitRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a GitRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A GitRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the HTTP/S or SSH address of the +Git repository. + +**Note:** Unlike using `git`, the +[shorter scp-like syntax](https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#_the_ssh_protocol) +is not supported for SSH addresses (e.g. `user@example.com:repository.git`). +Instead, the valid URL format is `ssh://user@example.com:22/repository.git`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the GitRepository, containing authentication +credentials for the Git repository. + +The required fields in the Secret depend on the specified protocol in the +[URL](#url). + +#### Basic access authentication + +To authenticate towards a Git repository over HTTPS using basic access +authentication (in other words: using a username and password), the referenced +Secret is expected to contain `.data.username` and `.data.password` values. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: basic-access-auth +type: Opaque +data: + username: + password: +``` + +#### Bearer token authentication + +To authenticate towards a Git repository over HTTPS using bearer token +authentication (in other words: using a `Authorization: Bearer` header), the referenced +Secret is expected to contain the token in `.data.bearerToken`. + +**Note:** If you are looking to use OAuth tokens with popular servers (e.g. +[GitHub](https://docs.github.com/en/rest/overview/authenticating-to-the-rest-api?apiVersion=2022-11-28#authenticating-with-a-token-generated-by-an-app), +[Bitbucket](https://support.atlassian.com/bitbucket-cloud/docs/using-access-tokens/), +[GitLab](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#clone-using-a-token)), +you should use basic access authentication instead. These servers use basic HTTP +authentication, with the OAuth token as the password. Check the documentation of +your Git server for details. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: bearer-token-auth +type: Opaque +data: + bearerToken: +``` + +#### HTTPS Certificate Authority + +To provide a Certificate Authority to trust while connecting with a Git +repository over HTTPS, the referenced Secret can contain a `.data.caFile` +value. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-ca-credentials + namespace: default +type: Opaque +data: + caFile: +``` + +#### SSH authentication + +To authenticate towards a Git repository over SSH, the referenced Secret is +expected to contain `identity` and `known_hosts` fields. With the respective +private key of the SSH key pair, and the host keys of the Git repository. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssh-credentials +type: Opaque +stringData: + identity: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- + known_hosts: | + github.com ecdsa-sha2-nistp256 AAAA... +``` + +Alternatively, the Flux CLI can be used to automatically create the +secret, and also populate the known_hosts: + +```sh +flux create secret git podinfo-auth \ + --url=ssh://git@github.com/stefanprodan/podinfo \ + --private-key-file=./identity +``` + +For password-protected SSH private keys, the password must be provided +via an additional `password` field in the secret. Flux CLI also supports +this via the `--password` flag. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Git repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for Git operations +like cloning. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the Git reference to resolve and +watch for changes. References are specified in one or more subfields +(`.branch`, `.tag`, `.semver`, `.name`, `.commit`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to a `master` +branch reference. + +#### Branch example + +To Git checkout a specified branch, use `.spec.ref.branch`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + branch: +``` + +This will perform a shallow clone to only fetch the specified branch. + +#### Tag example + +To Git checkout a specified tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + tag: +``` + +This field takes precedence over [`.branch`](#branch-example). + +#### SemVer example + +To Git checkout a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.branch`](#branch-example) and +[`.tag`](#tag-example). + + +#### Name example + +To Git checkout a specfied [reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References), +use `.spec.ref.name`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + # Ref name format reference: https://git-scm.com/docs/git-check-ref-format#_description + name: +``` + +Valid examples are: `refs/heads/main`, `refs/tags/v0.1.0`, `refs/pull/420/head`, +`refs/merge-requests/1/head`. + +This field takes precedence over [`.branch`](#branch-example), +[`.tag`](#tag-example), and [`.semver`](#semver-example). + +#### Commit example + +To Git checkout a specified commit, use `.spec.ref.commit`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + commit: "" +``` + +This field takes precedence over all other fields. It can be combined with +`.spec.ref.branch` to perform a shallow clone of the branch, in which the +commit must exist: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ref: + branch: + commit: "" +``` + +### Verification + +`.spec.verify` is an optional field to enable the verification of Git commit +signatures. The field offers two subfields: + +- `.mode`, to specify what Git commit object should be verified. Only supports + `head` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the GitRepository. Containing the (PGP) public keys of trusted Git authors. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 1m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + verify: + mode: head + secretRef: + name: pgp-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the GitRepository's `.status.conditions`: + +- `type: SourceVerifiedCondition` +- `status: "True"` +- `reason: Succeeded` + +#### Verification Secret example + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: pgp-public-keys + namespace: default +type: Opaque +data: + author1.asc: + author2.asc: +``` + +Exporting armored public keys (`.asc` files) using `gpg`, and generating a +Secret: + +```sh +# Export armored public keys +gpg --export --armor 3CB12BA185C47B67 > author1.asc +gpg --export --armor 6A7436E8790F8689 > author2.asc +# Generate secret +kubectl create secret generic pgp-public-keys \ + --from-file=author1.asc \ + --from-file=author2.asc \ + -o yaml +``` + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +GitRepository. When set to `true`, the controller will stop reconciling the +GitRepository, and changes to the resource or in the Git repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +### Git implementation + +`.spec.gitImplementation` is deprecated and its value ignored, the git +implementation used across Flux is go-git. + +#### Optimized Git clones + +Optimized Git clones decreases resource utilization for GitRepository +reconciliations. + +When enabled, it avoids full Git clone operations by first checking whether +the revision of the last stored artifact is still the head of the remote +repository and none of the other factors that contribute to a change in the +artifact, like ignore rules and included repositories, have changed. If that is +so, the reconciliation is skipped. Else, a full reconciliation is performed as +usual. + +This feature is enabled by default. It can be disabled by starting the +controller with the argument `--feature-gates=OptimizedGitClones=false`. + +NB: GitRepository objects configured for SemVer or Commit clones are +not affected by this functionality. + +#### Proxy support + +When a proxy is configured in the source-controller Pod through the appropriate +environment variables, for example `HTTPS_PROXY`, `NO_PROXY`, etc. + +### Recurse submodules + +`.spec.recurseSubmodules` is an optional field to enable the initialization of +all submodules within the cloned Git repository, using their default settings. +This option defaults to `false`. + +Note that for most Git providers (e.g. GitHub and GitLab), deploy keys can not +be used as reusing a key across multiple repositories is not allowed. You have +to use either [HTTPS token-based authentication](#basic-access-authentication), +or an SSH key belonging to a (bot) user who has access to the main repository +and all submodules. + +### Include + +`.spec.include` is an optional field to map the contents of GitRepository +Artifacts into another. This may look identical to Git submodules but has +multiple benefits over regular submodules: + +- Including a `GitRepository` allows you to use different authentication + methods for different repositories. +- A change in the included repository will trigger an update of the including + repository. +- Multiple `GitRepository` objects could include the same repository, which + decreases the amount of cloning done compared to using submodules. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: include-example +spec: + include: + - repository: + name: other-repository + fromPath: deploy/kubernetes + toPath: base/app +``` + +The `.fromPath` and `.toPath` fields allow you to limit the files included, and +where they will be copied to. If you do not specify a value for `.fromPath`, +all files from the referenced GitRepository Artifact will be included. The +`.toPath` defaults to the `.repository.name` (e.g. `./other-repository/*`). + +## Working with GitRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the Git repository contents as an Artifact. It is +possible to overwrite and/or overrule the default exclusions using a file in +the Git repository and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the Git +repository. The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the repository root or in subdirectories. + +#### Ignore spec + +Another option is to define the exclusions within the GitRepository spec, using +the [`.spec.ignore` field](#ignore). Specified rules override the [default +exclusion list](#default-exclusions), and may overrule `.sourceignore` file +exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a GitRepository outside the +[specified interval window](#interval), a GitRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the GitRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite gitrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source git +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the GitRepository to reach +a [ready state](#ready-gitrepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a GitRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a GitRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch gitrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source git +``` + +**Note:** When a GitRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +GitRepository's Status until it is resumed. + +#### Resume a GitRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch gitrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source git +``` + +### Debugging a GitRepository + +There are several ways to gather information about a GitRepository for +debugging purposes. + +#### Describe the GitRepository + +Describing a GitRepository using +`kubectl describe gitrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" + Observed Generation: 2 + Reason: GitOperationFailed + Status: False + Type: Ready + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" + Observed Generation: 2 + Reason: GitOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/gitrepository-sample/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning GitOperationFailed 2s (x9 over 4s) source-controller failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" +``` + +#### Trace emitted Events + +To view events for specific GitRepository(s), `kubectl get events` can be used +in combination with `--field-sector` to list the Events for specific objects. +For example, running + +```sh +kubectl get events --field-selector involvedObject.kind=GitRepository,involvedObject.name= +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact gitrepository/ stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3' +36s Normal ArtifactUpToDate gitrepository/ artifact up-to-date with remote revision: 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc' +94s Warning GitOperationFailed gitrepository/ failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific GitRepository, e.g. +`flux logs --level=error --kind=GitRepository --name=`. + +## GitRepository Status + +### Artifact + +The GitRepository reports the latest synchronized state from the Git repository +as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: +status: + artifact: + digest: sha256:e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b + lastUpdateTime: "2022-01-29T06:59:23Z" + path: gitrepository///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: master@sha1:363a6a8fe6a7f13e05d34c163b0ef02a777da20a + size: 91318 + url: http://source-controller..svc.cluster.local./gitrepository///363a6a8fe6a7f13e05d34c163b0ef02a777da20a.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A GitRepository enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-gitrepository) while fetching the Git +state, it can be [ready](#ready-gitrepository), or it can [fail during +reconciliation](#failed-gitrepository). + +The GitRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the GitRepository to +become `Ready`. + +#### Reconciling GitRepository + +The source-controller marks a GitRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the GitRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the GitRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact revision differs from the current Artifact. + +When the GitRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the GitRepository's +`.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the GitRepository while their status value is `"True"`. + +#### Ready GitRepository + +The source-controller marks a GitRepository as _ready_ when it has the +following characteristics: + +- The GitRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote Git repository using + the current spec. +- The revision of the reported Artifact is up-to-date with the latest + resolved revision of the remote Git repository. + +When the GitRepository is "ready", the controller sets a Condition with the +following attributes in the GitRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +GitRepository is marked as [reconciling](#reconciling-gitrepository), or e.g. a +[transient error](#failed-gitrepository) occurs due to a temporary network issue. + +When the GitRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +GitRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed GitRepository + +The source-controller may get stuck trying to produce an Artifact for a +GitRepository without completing. This can occur due to some of the following +factors: + +- The remote Git repository [URL](#url) is temporarily unavailable. +- The Git repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- A specified Include is unavailable. +- The verification of the Git commit signature failed. +- The credentials in the referenced Secret are invalid. +- The GitRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the GitRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: GitOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the GitRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the +[verification of a Git commit signature](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerifiedCondition` +- `status: "False"` +- `reason: Failed` + +While the GitRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the GitRepository is marked as +[ready](#ready-gitrepository). + +Note that a GitRepository can be [reconciling](#reconciling-gitrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the GitRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Content Configuration Checksum + +The source-controller calculates the SHA256 checksum of the various +configurations of the GitRepository that indicate a change in source and +records it in `.status.contentConfigChecksum`. This field is used to determine +if the source artifact needs to be rebuilt. + +**Deprecation Note:** `contentConfigChecksum` is no longer used and will be +removed in the next API version. The individual components used for generating +content configuration checksum now have explicit fields in the status. This +makes the observations used by the controller for making artifact rebuild +decisions more transparent and easier to debug. + +### Observed Ignore + +The source-controller reports an observed ignore in the GitRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-gitrepository), or stalled due to error +it can not recover from without human intervention. +The value is the same as the [ignore in spec](#ignore). +It indicates the ignore rules used in building the current artifact in storage. +It is also used by the controller to determine if an artifact needs to be +rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + cue + pkg + ... +``` + +### Observed Recurse Submodules + +The source-controller reports an observed recurse submodule in the +GitRepository's `.status.observedRecurseSubmodules`. The observed recurse +submodules is the latest `.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[recurse submodules in spec](#recurse-submodules). It indicates the recurse +submodules configuration used in building the current artifact in storage. It is +also used by the controller to determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedRecurseSubmodules: true + ... +``` + +### Observed Include + +The source-controller reports observed include in the GitRepository's +`.status.observedInclude`. The observed include is the latest +`.spec.recurseSubmodules` value which resulted in a +[ready state](#ready-gitrepository), or stalled due to error it can not recover +from without human intervention. The value is the same as the +[include in spec](#include). It indicates the include configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedInclude: + - fromPath: deploy/webapp + repository: + name: repo1 + toPath: foo + - fromPath: deploy/secure + repository: + name: repo2 + toPath: bar + ... +``` + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the GitRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-gitrepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1beta2/helmcharts.md b/docs/spec/v1beta2/helmcharts.md new file mode 100644 index 000000000..3932a9694 --- /dev/null +++ b/docs/spec/v1beta2/helmcharts.md @@ -0,0 +1,865 @@ +# Helm Charts + + + +The `HelmChart` API defines a Source to produce an Artifact for a Helm chart +archive with a set of specific configurations. + +## Example + +The following is an example of a HelmChart. It fetches and/or packages a Helm +chart and exposes it as a tarball (`.tgz`) Artifact for the specified +configuration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: '5.*' +``` + +In the above example: + +- A HelmChart named `podinfo` is created, indicated by the `.metadata.name` + field. +- The source-controller fetches the Helm chart every five minutes from the + `podinfo` HelmRepository source reference, indicated by the + `.spec.sourceRef.kind` and `.spec.sourceRef.name` fields. +- The fetched Helm chart version is the latest available chart + version in the range specified in `spec.version`. This version is also used as + Artifact revision, reported in-cluster in the `.status.artifact.revision` + field. +- When the current Helm Chart version differs from the latest available chart + in the version range, it is fetched and/or packaged as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmchart.yaml`. + +**Note:** HelmChart is usually used by the helm-controller. Based on the +HelmRelease configuration, an associated HelmChart is created by the +helm-controller. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmchart.yaml + ``` + +2. Run `kubectl get helmchart` to see the HelmChart: + + ```console + NAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS + podinfo podinfo 5.* HelmRepository podinfo 53s True pulled 'podinfo' chart with version '5.2.1' + ``` + +3. Run `kubectl describe helmchart podinfo` to see the [Artifact](#artifact) and + [Conditions](#conditions) in the HelmChart's Status: + + ```console + Status: + Observed Source Artifact Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Artifact: + Digest: sha256:6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3 + Last Update Time: 2022-02-13T11:24:10Z + Path: helmchart/default/podinfo/podinfo-5.2.1.tgz + Revision: 5.2.1 + Size: 14166 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/podinfo-5.2.1.tgz + Conditions: + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: Ready + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: ArtifactInStorage + Observed Chart Name: podinfo + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ChartPullSucceeded 2m51s source-controller pulled 'podinfo' chart with version '5.2.1' + ``` + +## Writing a HelmChart spec + +As with all other Kubernetes config, a HelmChart needs `apiVersion`, `kind`, and +`metadata` fields. The name of a HelmChart object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmChart also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Source reference + +`.spec.sourceRef` is a required field that specifies a reference to the Source +the chart is available at. + +Supported references are: +- [`HelmRepository`](helmrepositories.md) +- [`GitRepository`](gitrepositories.md) +- [`Bucket`](buckets.md) + +Although there are three kinds of source references, there are only two +underlying implementations. The artifact building process for `GitRepository` +and `Bucket` are the same as they are already built source artifacts. In case +of `HelmRepository`, a chart is fetched and/or packaged based on the +configuration of the Helm chart. + +For a `HelmChart` to be reconciled, the associated artifact in the source +reference must be ready. If the source artifact is not ready, the `HelmChart` +reconciliation is retried. + +When the `metadata.generation` of the `HelmChart` don't match with the +`status.observedGeneration`, the chart is fetched from source and/or packaged. +If there's no `.spec.valuesFiles` specified, the chart is only fetched from the +source, and not packaged. If `.spec.valuesFiles` are specified, the chart is +fetched and packaged with the values files. When the `metadata.generation` +matches the `status.observedGeneration`, the chart is only fetched from source +or from the cache if available, and not packaged. + +When using a `HelmRepository` source reference, the secret reference defined in +the Helm repository is used to fetch the chart. + +The HelmChart reconciliation behavior varies depending on the source reference +kind, see [reconcile strategy](#reconcile-strategy). + +The attributes of the generated artifact also varies depending on the source +reference kind, see [artifact](#artifact). + +### Chart + +`.spec.chart` is a required field that specifies the name or path the Helm chart +is available at in the [Source reference](#source-reference). + +For `HelmRepository` Source reference, it'll be just the name of the chart. + +```yaml +spec: + chart: podinfo + sourceRef: + name: podinfo + kind: HelmRepository +``` + +For `GitRepository` and `Bucket` Source reference, it'll be the path to the +Helm chart directory. + +```yaml +spec: + chart: ./charts/podinfo + sourceRef: + name: podinfo + kind: +``` + +### Version + +`.spec.version` is an optional field to specify the version of the chart in +semver. It is applicable only when the Source reference is a `HelmRepository`. +It is ignored for `GitRepository` and `Bucket` Source reference. It defaults to +the latest version of the chart with value `*`. + +Version can be a fixed semver, minor or patch semver range of a specific +version (i.e. `4.0.x`) or any semver range (i.e. `>=4.0.0 <5.0.0`). + +### Values files + +`.spec.valuesFiles` is an optional field to specify an alternative list of +values files to use as the chart values (values.yaml). The file paths are +expected to be relative to the Source reference. Values files are merged in the +order of the list with the last file overriding the first. It is ignored when +omitted. When values files are specified, the chart is fetched and packaged +with the provided values. + +```yaml +spec: + chart: + spec: + chart: podinfo + ... + valuesFiles: + - values.yaml + - values-production.yaml +``` + +Values files also affect the generated artifact revision, see +[artifact](#artifact). + +### Ignore missing values files + +`.spec.ignoreMissingValuesFiles` is an optional field to specify whether missing +values files should be ignored rather than be considered errors. It defaults to +`false`. + +When `.spec.valuesFiles` and `.spec.ignoreMissingValuesFiles` are specified, +the `.status.observedValuesFiles` field is populated with the list of values +files that were found and actually contributed to the packaged chart. + +### Reconcile strategy + +`.spec.reconcileStrategy` is an optional field to specify what enables the +creation of a new Artifact. Valid values are `ChartVersion` and `Revision`. +`ChartVersion` is used for creating a new artifact when the chart version +changes in a `HelmRepository`. `Revision` is used for creating a new artifact +when the source revision changes in a `GitRepository` or a `Bucket` Source. It +defaults to `ChartVersion`. + +**Note:** If the reconcile strategy is `ChartVersion` and the source reference +is a `GitRepository` or a `Bucket`, no new chart artifact is produced on updates +to the source unless the `version` in `Chart.yaml` is incremented. To produce +new chart artifact on change in source revision, set the reconcile strategy to +`Revision`. + +Reconcile strategy also affects the artifact version, see [artifact](#artifact) +for more details. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Helm Chart source must be checked for updates. + +After successfully reconciling a HelmChart object, the source-controller +requeues the object for inspection after the specified interval. The value must +be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the source for updates every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmChart objects are set +up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmChart. When set to `true`, the controller will stop reconciling the +HelmChart, and changes to the resource or the Helm chart Source will not result +in a new Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +### Verification + +**Note:** This feature is available only for Helm charts fetched from an OCI Registry. + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the HelmChart, containing the public keys of trusted authors. For Notation this Secret should also include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of HelmChart hosted in an OCI Registry, create a Kubernetes +secret with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify a HelmChart's signature. +This allows for older HelmCharts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available HelmCharts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying HelmCharts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + interval: 5m + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: ">=6.1.6" + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo +spec: + interval: 1m0s + url: oci://ghcr.io/stefanprodan/charts + type: "oci" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +## Working with HelmCharts + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmChart outside the +[specified interval window](#interval), a HelmCHart can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmchart/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmChart to reach a +[ready state](#ready-helmchart) using `kubectl`: + +```sh +kubectl wait helmchart/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmChart, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmChart + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +**Note:** When a HelmChart has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmChart's Status until it is resumed. + +#### Resume a HelmChart + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +### Debugging a HelmChart + +There are several ways to gather information about a HelmChart for debugging +purposes. + +#### Describe the HelmChart + +Describing a HelmChart using `kubectl describe helmchart ` displays +the latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: Stalled + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: False + Type: Ready + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: FetchFailed + Last Handled Reconcile At: 1644759954 + Observed Chart Name: podinfo + Observed Generation: 3 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning InvalidChartReference 11s source-controller invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with ver +sion matching '9.*' found +``` + +#### Trace emitted Events + +To view events for specific HelmChart(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmChart/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +22s Warning InvalidChartReference helmchart/ invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found +2s Normal ChartPullSucceeded helmchart/ pulled 'podinfo' chart with version '6.0.3' +2s Normal ArtifactUpToDate helmchart/ artifact up-to-date with remote revision: '6.0.3' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmChart, e.g. `flux logs --level=error --kind=HelmChart --name=`. + +### Improving resource consumption by enabling the cache + +When using a `HelmRepository` as Source for a `HelmChart`, the controller loads +the repository index in memory to find the latest version of the chart. + +The controller can be configured to cache Helm repository indexes in memory. +The cache is used to avoid loading repository indexes for every `HelmChart` +reconciliation. + +The following flags are provided to enable and configure the cache: +- `helm-cache-max-size`: The maximum size of the cache in number of indexes. + If `0`, then the cache is disabled. +- `helm-cache-ttl`: The TTL of an index in the cache. +- `helm-cache-purge-interval`: The interval at which the cache is purged of + expired items. + +The caching strategy is to pull a repository index from the cache if it is +available, otherwise to load the index, retrieve and build the chart, +then cache the index. The cached index TTL is refreshed every time the +Helm repository index is loaded with the `helm-cache-ttl` value. + +The cache is purged of expired items every `helm-cache-purge-interval`. + +When the cache is full, no more items can be added to the cache, and the +source-controller will report a warning event instead. + +In order to use the cache, set the related flags in the source-controller +Deployment config: + +```yaml + spec: + containers: + - args: + - --watch-all-namespaces + - --log-level=info + - --log-encoding=json + - --enable-leader-election + - --storage-path=/data + - --storage-adv-addr=source-controller.$(RUNTIME_NAMESPACE).svc.cluster.local. + ## Helm cache with up to 10 items, i.e. 10 indexes. + - --helm-cache-max-size=10 + ## TTL of an index is 1 hour. + - --helm-cache-ttl=1h + ## Purge expired index every 10 minutes. + - --helm-cache-purge-interval=10m +``` + +## HelmChart Status + +### Artifact + +The HelmChart reports the last built chart as an Artifact object in the +`.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`-.tgz`), +and can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e + lastUpdateTime: "2022-02-10T18:53:47Z" + path: helmchart///-.tgz + revision: 6.0.3 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-.tgz +``` + +When using a `HelmRepository` as the source reference and values files are +provided, the value of `status.artifact.revision` is the chart version combined +with the `HelmChart` object generation. For example, if the chart version is +`6.0.3` and the `HelmChart` object generation is `1`, the +`status.artifact.revision` value will be `6.0.3+1`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+1.tgz + revision: 6.0.3+1 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+1.tgz + observedGeneration: 1 + ... +``` + +When using a `GitRepository` or a `Bucket` as the source reference and +`Revision` as the reconcile strategy, the value of `status.artifact.revision` is +the chart version combined with the first 12 characters of the revision of the +`GitRepository` or `Bucket`. For example if the chart version is `6.0.3` and the +revision of the `Bucket` is `4e5cbb7b97d00a8039b8810b90b922f4256fd3bd8f78b934b4892dae13f7ca87`, +the `status.artifact.revision` value will be `6.0.3+4e5cbb7b97d0`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+4e5cbb7b97d0.tgz + revision: 6.0.3+4e5cbb7b97d0 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+4e5cbb7b97d0.tgz +``` + +### Conditions + +A HelmChart enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmchart) while fetching or building the +chart, it can be [ready](#ready-helmchart), it can +[fail during reconciliation](#failed-helmchart), or it can +[stall](#stalled-helmchart). + +The HelmChart API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmChart to become +`Ready`. + +#### Reconciling HelmChart + +The source-controller marks a HelmChart as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the HelmChart, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the HelmChart is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmChart is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmChart's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new version, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewChart` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmChart while their status value is `"True"`. + +#### Ready HelmChart + +The source-controller marks a HelmChart as _ready_ when it has the following +characteristics: + +- The HelmChart reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch and build the Helm chart using the current + spec. +- The version/revision of the reported Artifact is up-to-date with the + latest version/revision of the Helm chart. + +When the HelmChart is "ready", the controller sets a Condition with the +following attributes in the HelmChart's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmChart is marked as [reconciling](#reconciling-helmchart), or e.g. +a [transient error](#failed-helmchart) occurs due to a temporary network issue. + +When the HelmChart Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmChart's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmChart + +The source-controller may get stuck trying to produce an Artifact for a +HelmChart without completing. This can occur due to some of the following +factors: + +- The Helm chart Source is temporarily unavailable. +- The credentials in the [Source reference](#source-reference) Secret are + invalid. +- The HelmChart spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmChart's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: StorageOperationFailed` | `reason: URLInvalid` | `reason: IllegalPath` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmChart while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmChart has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmChart is marked as [ready](#ready-helmchart). + +Note that a HelmChart can be [reconciling](#reconciling-helmchart) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmChart spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmChart + +The source-controller can mark a HelmChart as _stalled_ when it determines that +without changes to the spec, the reconciliation can not succeed. +For example because a HelmChart Version is set to a non-existing version. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmchart), but adds another Condition with the following +attributes to the HelmChart's `.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: InvalidChartReference` + +While the HelmChart has this Condition, the controller will not requeue the +resource any further, and will stop reconciling the resource until a change to +the spec is made. + +### Observed Source Artifact Revision + +The source-controller reports the revision of the last +[Source reference's](#source-reference) Artifact the current chart was fetched +from in the HelmChart's `.status.observedSourceArtifactRevision`. It is used to +keep track of the source artifact revision and detect when a new source +artifact is available. + +### Observed Chart Name + +The source-controller reports the last resolved chart name of the Artifact +for the [`.spec.chart` field](#chart) in the HelmChart's +`.status.observedChartName`. It is used to keep track of the chart and detect +when a new chart is found. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmChart's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-helmchart), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1beta2/helmrepositories.md b/docs/spec/v1beta2/helmrepositories.md new file mode 100644 index 000000000..0fd33ed00 --- /dev/null +++ b/docs/spec/v1beta2/helmrepositories.md @@ -0,0 +1,914 @@ +# Helm Repositories + + + +There are 2 [Helm repository types](#type) defined by the `HelmRepository` API: +- Helm HTTP/S repository, which defines a Source to produce an Artifact for a Helm +repository index YAML (`index.yaml`). +- OCI Helm repository, which defines a source that does not produce an Artifact. + It's a data container to store the information about the OCI repository that + can be used by [HelmChart](helmcharts.md) to access OCI Helm charts. + +## Examples + +### Helm HTTP/S repository + +The following is an example of a HelmRepository. It creates a YAML (`.yaml`) +Artifact from the fetched Helm repository index (in this example the [podinfo +repository](https://github.com/stefanprodan/podinfo)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://stefanprodan.github.io/podinfo +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller fetches the Helm repository index YAML every five + minutes from `https://stefanprodan.github.io/podinfo`, indicated by the + `.spec.interval` and `.spec.url` fields. +- The digest (algorithm defaults to SHA256) of the Helm repository index after + stable sorting the entries is used as Artifact revision, reported in-cluster + in the `.status.artifact.revision` field. +- When the current HelmRepository revision differs from the latest fetched + revision, it is stored as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://stefanprodan.github.io/podinfo 4s True stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + ``` + +3. Run `kubectl describe helmrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the HelmRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Last Update Time: 2022-02-04T09:55:58Z + Path: helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Size: 40898 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Conditions: + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 1m source-controller fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' + ``` + +### Helm OCI repository + +The following is an example of an OCI HelmRepository. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + type: "oci" + interval: 5m0s + url: oci://ghcr.io/stefanprodan/charts +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- A HelmChart that refers to this HelmRepository uses the URL in the `.spec.url` + field to access the OCI Helm chart. + +**NOTE:** The `.spec.interval` field is only used by the `default` Helm +repository and is ignored for any value in `oci` Helm repository. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/charts 3m22s + ``` + +Because the OCI Helm repository is a data container, there's nothing to report +for `READY` and `STATUS` columns above. The existence of the object can be +considered to be ready for use. + +## Writing a HelmRepository spec + +As with all other Kubernetes config, a HelmRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a HelmRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Type + +`.spec.type` is an optional field that specifies the Helm repository type. + +Possible values are `default` for a Helm HTTP/S repository, or `oci` for an OCI Helm repository. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used +for authentication purposes. + +Supported options are: +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when static credentials +are used for authentication. If you do not specify `.spec.provider`, it defaults +to `generic`. + +**Note**: The provider field is supported only for Helm OCI repositories. The `spec.type` +field must be set to `oci`. + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS worker +node IAM role or IAM Role for Service Accounts (IRSA), and by extension gain access +to ECR. + +##### EKS Worker Node IAM Role + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +##### IAM Role for Service Accounts (IRSA) + +When using IRSA to enable access to ECR, add the following patch to your bootstrap +repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity, Kubelet Managed +Identity or Azure Active Directory pod-managed identity (aad-pod-identity), and +by extension gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running on +it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Azure Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +##### Deprecated: AAD Pod Identity + +**Warning:** The AAD Pod Identity project will be archived in +[September 2023](https://github.com/Azure/aad-pod-identity#-announcement), +and you are advised to use Workload Identity instead. + +When using aad-pod-identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + - op: add + path: /spec/template/metadata/labels/aadpodidbinding + value: + target: + kind: Deployment + name: source-controller +``` + +When using pod-managed identity on an AKS cluster, AAD Pod Identity has to be used +to give the `source-controller` pod access to the ACR. To do this, you have to install +`aad-pod-identity` on your cluster, create a managed identity that has access to the +container registry (this can also be the Kubelet identity if it has `AcrPull` role +assignment on the ACR), create an `AzureIdentity` and `AzureIdentityBinding` that describe +the managed identity and then label the `source-controller` deployment with the name of the +AzureIdentity as shown in the patch above. Please take a look at [this guide](https://azure.github.io/aad-pod-identity/docs/) +or [this one](https://docs.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) +if you want to use AKS pod-managed identities add-on that is in preview. + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes or +Workload Identity, and by extension gain access to GCR or Artifact Registry. + +##### Access Scopes + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and Artifact Registry, +source-controller running on it will also have access to them. + +##### GKE Workload Identity + +When using Workload Identity to enable access to GCR or Artifact Registry, add the +following patch to your bootstrap repository, in the `flux-system/kustomization.yaml` +file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using Google Container Registry service, +the needed permission is instead `storage.objects.list` which can be bound as part +of the Container Registry Service Agent role. Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure non-TLS connections when fetching Helm chart OCI artifacts. + +**Note**: The insecure field is supported only for Helm OCI repositories. +The `spec.type` field must be set to `oci`. + +### Interval + +**Note:** This field is ineffectual for [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.interval` is a an optional field that specifies the interval which the +Helm repository index must be consulted at. When not set, the default value is +`1m`. + +After successfully reconciling a HelmRepository object, the source-controller +requeues the object for inspection after the specified interval. The value +must be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to fetch the HelmRepository index YAML every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmRepository objects +are set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### URL + +`.spec.url` is a required field that depending on the [type of the HelmRepository object](#type) +specifies the HTTP/S or OCI address of a Helm repository. + +For OCI, the URL is expected to point to a registry repository, e.g. `oci://ghcr.io/fluxcd/source-controller`. + +For Helm repositories which require authentication, see [Secret reference](#secret-reference). + +### Timeout + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.timeout` is an optional field to specify a timeout for the fetch +operation. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. When not set, the +default value is `1m`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the HelmRepository, containing authentication +credentials for the repository. + +#### Basic access authentication + +To authenticate towards a Helm repository using basic access authentication +(in other words: using a username and password), the referenced Secret is +expected to contain `.data.username` and `.data.password` values. + +For example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + secretRef: + name: example-user +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-user + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +OCI Helm repository example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/my-user/my-private-repo + type: "oci" + secretRef: + name: oci-creds +--- +apiVersion: v1 +kind: Secret +metadata: + name: oci-creds + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +For OCI Helm repositories, Kubernetes secrets of type [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types) are also supported. +It is possible to create one such secret with `kubectl create secret docker-registry` +or using the Flux CLI: + +```yaml +flux create secret oci ghcr-auth \ + --url=ghcr.io \ + --username=flux \ + --password=${GITHUB_PAT} +``` + +**Warning:** Support for specifying TLS authentication data using this API has been +deprecated. Please use [`.spec.certSecretRef`](#cert-secret-reference) instead. +If the controller uses the secret specified by this field to configure TLS, then +a deprecation warning will be logged. + +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Pass credentials + +`.spec.passCredentials` is an optional field to allow the credentials from the +[Secret reference](#secret-reference) to be passed on to a host that does not +match the host as defined in URL. This may for example be required if the host +advertised chart URLs in the index differ from the specified URL. + +Enabling this should be done with caution, as it can potentially result in +credentials getting stolen in a man-in-the-middle attack. This feature only applies +to HTTP/S Helm repositories. + +### Suspend + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmRepository. When set to `true`, the controller will stop reconciling the +HelmRepository, and changes to the resource or the Helm repository index will +not result in a new Artifact. When the field is set to `false` or removed, it +will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with HelmRepositories + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, once created, they +are ready to used by [HelmCharts](helmcharts.md). + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmRepository outside the +[specified interval window](#interval), a HelmRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source helm +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmRepository to +reach a [ready state](#ready-helmrepository) using `kubectl`: + +```sh +kubectl wait helmrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source helm +``` + +**Note:** When a HelmRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmRepository's Status until it is resumed. + +#### Resume a HelmRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source helm +``` + +### Debugging a HelmRepository + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, they are static +objects that don't require debugging if valid. + +There are several ways to gather information about a HelmRepository for debugging +purposes. + +#### Describe the HelmRepository + +Describing a HelmRepository using `kubectl describe helmrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: Stalled + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: False + Type: Ready + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: FetchFailed + Observed Generation: 2 + URL: http://source-controller.source-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning Failed 6s source-controller failed to construct Helm client: scheme "invalid" not supported +``` + +#### Trace emitted Events + +To view events for specific HelmRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +107s Warning Failed helmrepository/ failed to construct Helm client: scheme "invalid" not supported +7s Normal NewArtifact helmrepository/ fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' +3s Normal ArtifactUpToDate helmrepository/ artifact up-to-date with remote revision: 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmRepository, e.g. `flux logs --level=error --kind=HelmRepository --name=`. + +## HelmRepository Status + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), they do not contain any information in the +status. + +### Artifact + +The HelmRepository reports the last fetched repository index as an Artifact +object in the `.status.artifact` of the resource. + +The Artifact file is an exact copy of the Helm repository index YAML +(`index-.yaml`) as fetched, and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: +status: + artifact: + digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + lastUpdateTime: "2022-02-04T09:55:58Z" + path: helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + size: 40898 + url: http://source-controller.flux-system.svc.cluster.local./helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml +``` + +### Conditions + +A HelmRepository enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmrepository) while fetching the +repository index, it can be [ready](#ready-helmrepository), it can +[fail during reconciliation](#failed-helmrepository), or it can +[stall](#stalled-helmrepository). + +The HelmRepository API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmRepository to become +`Ready`. + +#### Reconciling HelmRepository + +The source-controller marks a HelmRepository as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the HelmRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the HelmRepository is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmRepository while their status value is `"True"`. + +#### Ready HelmRepository + +The source-controller marks a HelmRepository as _ready_ when it has the following +characteristics: + +- The HelmRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch the Helm repository index using the current + spec. +- The revision of the reported Artifact is up-to-date with the latest + revision of the Helm repository. + +When the HelmRepository is "ready", the controller sets a Condition with the following +attributes in the HelmRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmRepository is marked as [reconciling](#reconciling-helmrepository), or e.g. +a [transient error](#failed-helmrepository) occurs due to a temporary network +issue. + +When the HelmRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmRepository + +The source-controller may get stuck trying to produce an Artifact for a +HelmRepository without completing. This can occur due to some of the following +factors: + +- The Helm repository [URL](#url) is temporarily unavailable. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The HelmRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: IndexationFailed` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmRepository has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmRepository is marked as [ready](#ready-helmrepository). + +Note that a HelmRepository can be [reconciling](#reconciling-helmrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmRepository + +The source-controller can mark a HelmRepository as _stalled_ when it determines +that without changes to the spec, the reconciliation can not succeed. +For example because a Helm repository URL with an unsupported protocol is +specified. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmrepository), but adds another Condition with the following +attributes to the HelmRepository's +`.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: URLInvalid` + +While the HelmRepository has this Condition, the controller will not requeue +the resource any further, and will stop reconciling the resource until a change +to the spec is made. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-helmrepository), +or stalled due to error it can not recover from without human intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1beta2/ocirepositories.md b/docs/spec/v1beta2/ocirepositories.md new file mode 100644 index 000000000..eb5de4c5f --- /dev/null +++ b/docs/spec/v1beta2/ocirepositories.md @@ -0,0 +1,1151 @@ +# OCI Repositories + + + +The `OCIRepository` API defines a Source to produce an Artifact for an OCI +repository. + +## Example + +The following is an example of an OCIRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from an OCI repository for the +resolved digest. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + tag: latest +``` + +In the above example: + +- An OCIRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the OCI repository every five minutes, indicated + by the `.spec.interval` field. +- It pulls the `latest` tag of the `ghcr.io/stefanprodan/manifests/podinfo` + repository, indicated by the `.spec.ref.tag` and `.spec.url` fields. +- The resolved tag and SHA256 digest is used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current OCIRepository digest differs from the latest fetched + digest, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `ocirepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f ocirepository.yaml + ``` + +2. Run `kubectl get ocirepository` to see the OCIRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + ``` + +3. Run `kubectl describe ocirepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the OCIRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b + Last Update Time: 2022-06-14T11:23:36Z + Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Revision: latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Size: 1105 + URL: http://source-controller.flux-system.svc.cluster.local./ocirepository/oci/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Conditions: + Last Transition Time: 2022-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact with revision 'latest/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' + ``` + +## Writing an OCIRepository spec + +As with all other Kubernetes config, an OCIRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of an OCIRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +An OCIRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the address of the +container image repository in the format `oci://://`. + +**Note:** that specifying a tag or digest is not acceptable for this field. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used for +authentication purposes. + +Supported options are: + +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when +static credentials are used for authentication, either with +`spec.secretRef` or `spec.serviceAccountName`. +If you do not specify `.spec.provider`, it defaults to `generic`. + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS +worker node IAM role or IAM Role for Service Accounts (IRSA), and by extension +gain access to ECR. + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +When using IRSA to enable access to ECR, add the following patch to your +bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running +on it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes +or Workload Identity, and by extension gain access to GCR or Artifact Registry. + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and +Artifact Registry, source-controller running on it will also have access to them. + +When using Workload Identity to enable access to GCR or Artifact Registry, add +the following patch to your bootstrap repository, in the +`flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using +Google Container Registry service, the needed permission is instead `storage.objects.list` +which can be bound as part of the Container Registry Service Agent role. +Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the OCIRepository, containing authentication +credentials for the OCI repository. + +This secret is expected to be in the same format as [`imagePullSecrets`][image-pull-secrets]. +The usual way to create such a secret is with: + +```sh +kubectl create secret docker-registry ... +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a name reference to a +Service Account in the same namespace as the OCIRepository. The controller will +fetch the image pull secrets attached to the service account and use them for authentication. + +**Note:** that for a publicly accessible image repository, you don't need to provide a `secretRef` +nor `serviceAccountName`. + +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: oci://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +**Warning:** Support for the `caFile`, `certFile` and `keyFile` keys have been +deprecated. If you have any Secrets using these keys and specified in an +OCIRepository, the controller will log a deprecation warning. + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +OCI repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple OCIRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for OCI operations +like pulling. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the OCI reference to resolve and +watch for changes. References are specified in one or more subfields +(`.tag`, `.semver`, `.digest`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to the `latest` +tag. + +#### Tag example + +To pull a specific tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + ref: + tag: "" +``` + +#### SemVer example + +To pull a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.tag`](#tag-example). + +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + +#### Digest example + +To pull a specific digest, use `.spec.ref.digest`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + ref: + digest: "sha256:" +``` + +This field takes precedence over all other fields. + +### Layer selector + +`spec.layerSelector` is an optional field to specify which layer should be extracted from the OCI Artifact. +If not specified, the controller will extract the first layer found in the artifact. + +To extract a layer matching a specific +[OCI media type](https://github.com/opencontainers/image-spec/blob/v1.0.2/media-types.md): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + layerSelector: + mediaType: "application/deployment.content.v1.tar+gzip" + operation: extract # can be 'extract' or 'copy', defaults to 'extract' +``` + +If the layer selector matches more than one layer, the first layer matching the specified media type will be used. +Note that the selected OCI layer must be +[compressed](https://github.com/opencontainers/image-spec/blob/v1.0.2/layer.md#gzip-media-types) +in the `tar+gzip` format. + +When `.spec.layerSelector.operation` is set to `copy`, instead of extracting the +compressed layer, the controller copies the tarball as-is to storage, thus +keeping the original content unaltered. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Verification + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available OCI artifacts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying artifacts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: podinfo +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +OCIRepository. When set to `true`, the controller will stop reconciling the +OCIRepository, and changes to the resource or in the OCI repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +## Working with OCIRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the OCI repository contents as an Artifact. +It is possible to overwrite and/or overrule the default exclusions using +the [`.spec.ignore` field](#ignore). + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the artifact. +The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern +entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the artifact root or in subdirectories. + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a OCIRepository outside the +[specified interval window](#interval), an OCIRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the OCIRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite ocirepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source oci +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the OCIRepository to reach +a [ready state](#ready-ocirepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of an OCIRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend an OCIRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source oci +``` + +**Note:** When an OCIRepository has an Artifact and it is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +OCIRepository's Status until it is resumed. + +#### Resume an OCIRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source oci +``` + +### Debugging an OCIRepository + +There are several ways to gather information about a OCIRepository for +debugging purposes. + +#### Describe the OCIRepository + +Describing an OCIRepository using +`kubectl describe ocirepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: False + Type: Ready + Last Transition Time: 2022-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./ocirepository/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning OCIOperationFailed 2s (x9 over 4s) source-controller failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +#### Trace emitted Events + +To view events for specific OCIRepository(s), `kubectl events` can be used +in combination with `--for` to list the Events for specific objects. For +example, running + +```sh +kubectl events --for OCIRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact ocirepository/ stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote revision: 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +94s Warning OCIOperationFailed ocirepository/ failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific OCIRepository, e.g. +`flux logs --level=error --kind=OCIRepository --name=`. + +## OCIRepository Status + +### Artifact + +The OCIRepository reports the latest synchronized state from the OCI repository +as an Artifact object in the `.status.artifact` of the resource. + +The `.status.artifact.revision` holds the tag and SHA256 digest of the upstream OCI artifact. + +The `.status.artifact.metadata` holds the upstream OCI artifact metadata such as the +[OpenContainers standard annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md). +If the OCI artifact was created with `flux push artifact`, then the `metadata` will contain the following +annotations: +- `org.opencontainers.image.created` the date and time on which the artifact was built +- `org.opencontainers.image.source` the URL of the Git repository containing the source files +- `org.opencontainers.image.revision` the Git branch and commit SHA1 of the source files + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +status: + artifact: + digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 + lastUpdateTime: "2022-08-08T09:35:45Z" + metadata: + org.opencontainers.image.created: "2022-08-08T12:31:41+03:00" + org.opencontainers.image.revision: 6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872 + org.opencontainers.image.source: https://github.com/stefanprodan/podinfo.git + path: ocirepository///.tar.gz + revision: @ + size: 1105 + url: http://source-controller..svc.cluster.local./ocirepository///.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +OCIRepository has various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-ocirepository) while fetching the remote +state, it can be [ready](#ready-ocirepository), or it can [fail during +reconciliation](#failed-ocirepository). + +The OCIRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the OCIRepository to +become `Ready`. + +#### Reconciling OCIRepository + +The source-controller marks an OCIRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the OCIRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the OCIRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact digest differs from the current Artifact. + +When the OCIRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the OCIRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the OCIRepository while their status value is `"True"`. + +#### Ready OCIRepository + +The source-controller marks an OCIRepository as _ready_ when it has the +following characteristics: + +- The OCIRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote OCI repository using + the current spec. +- The digest of the reported Artifact is up-to-date with the latest + resolved digest of the remote OCI repository. + +When the OCIRepository is "ready", the controller sets a Condition with the +following attributes in the OCIRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +OCIRepository is marked as [reconciling](#reconciling-ocirepository), or e.g. a +[transient error](#failed-ocirepository) occurs due to a temporary network issue. + +When the OCIRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +OCIRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed OCIRepository + +The source-controller may get stuck trying to produce an Artifact for a +OCIRepository without completing. This can occur due to some of the following +factors: + +- The remote OCI repository [URL](#url) is temporarily unavailable. +- The OCI repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The OCIRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the OCIRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: OCIArtifactPullFailed` | `reason: OCIArtifactLayerOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the OCIRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the signature +[verification](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "False"` +- `reason: VerificationError` + +While the OCIRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the OCIRepository is marked as +[ready](#ready-ocirepository). + +Note that a OCIRepository can be [reconciling](#reconciling-ocirepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the OCIRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Content Configuration Checksum + +The source-controller calculates the SHA256 checksum of the various +configurations of the OCIRepository that indicate a change in source and +records it in `.status.contentConfigChecksum`. This field is used to determine +if the source artifact needs to be rebuilt. + +**Deprecation Note:** `contentConfigChecksum` is no longer used and will be +removed in the next API version. The individual components used for generating +content configuration checksum now have explicit fields in the status. This +makes the observations used by the controller for making artifact rebuild +decisions more transparent and easier to debug. + +### Observed Ignore + +The source-controller reports an observed ignore in the OCIRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-ocirepository), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. It is also used by the controller to determine if +an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Layer Selector + +The source-controller reports an observed layer selector in the OCIRepository's +`.status.observedLayerSelector`. The observed layer selector is the latest +`.spec.layerSelector` value which resulted in a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human intervention. +The value is the same as the [layer selector in spec](#layer-selector). +It indicates the layer selection configuration used in building the current +artifact in storage. It is also used by the controller to determine if an +artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedLayerSelector: + mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip + operation: copy + ... +``` + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the OCIRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus +[image-pull-secrets]: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +[image-auto-provider-secrets]: https://fluxcd.io/flux/guides/image-update/#imagerepository-cloud-providers-authentication +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[sops-guide]: https://fluxcd.io/flux/guides/mozilla-sops/ diff --git a/go.mod b/go.mod index 3819e2ba2..21c15753e 100644 --- a/go.mod +++ b/go.mod @@ -1,36 +1,429 @@ module github.com/fluxcd/source-controller -go 1.15 +go 1.25.0 replace github.com/fluxcd/source-controller/api => ./api +// Replace digest lib to master to gather access to BLAKE3. +// xref: https://github.com/opencontainers/go-digest/pull/66 +replace github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be + require ( - github.com/Masterminds/semver/v3 v3.1.1 - github.com/blang/semver/v4 v4.0.0 - github.com/cyphar/filepath-securejoin v0.2.2 - github.com/fluxcd/pkg/apis/meta v0.7.0 - github.com/fluxcd/pkg/gittestserver v0.1.0 - github.com/fluxcd/pkg/helmtestserver v0.1.0 - github.com/fluxcd/pkg/lockedfile v0.0.5 - github.com/fluxcd/pkg/runtime v0.8.0 - github.com/fluxcd/pkg/ssh v0.0.5 - github.com/fluxcd/pkg/untar v0.0.5 - github.com/fluxcd/pkg/version v0.0.1 - github.com/fluxcd/source-controller/api v0.7.0 - github.com/go-git/go-billy/v5 v5.0.0 - github.com/go-git/go-git/v5 v5.2.0 - github.com/go-logr/logr v0.3.0 - github.com/libgit2/git2go/v31 v31.3.0 - github.com/minio/minio-go/v7 v7.0.5 - github.com/onsi/ginkgo v1.14.1 - github.com/onsi/gomega v1.10.2 - github.com/spf13/pflag v1.0.5 - golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - helm.sh/helm/v3 v3.5.0 - k8s.io/api v0.20.2 - k8s.io/apimachinery v0.20.2 - k8s.io/client-go v0.20.2 - sigs.k8s.io/controller-runtime v0.8.0 - sigs.k8s.io/yaml v1.2.0 + cloud.google.com/go/compute/metadata v0.8.0 + cloud.google.com/go/storage v1.56.1 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 + github.com/Masterminds/semver/v3 v3.4.0 + github.com/cyphar/filepath-securejoin v0.4.1 + github.com/distribution/distribution/v3 v3.0.0 + github.com/docker/cli v28.4.0+incompatible + github.com/docker/go-units v0.5.0 + github.com/elazarl/goproxy v1.7.2 + github.com/fluxcd/cli-utils v0.36.0-flux.15 + github.com/fluxcd/pkg/apis/event v0.19.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + github.com/fluxcd/pkg/artifact v0.3.0 + github.com/fluxcd/pkg/auth v0.31.0 + github.com/fluxcd/pkg/cache v0.11.0 + github.com/fluxcd/pkg/git v0.36.0 + github.com/fluxcd/pkg/git/gogit v0.40.0 + github.com/fluxcd/pkg/gittestserver v0.20.0 + github.com/fluxcd/pkg/helmtestserver v0.30.0 + github.com/fluxcd/pkg/http/transport v0.7.0 + github.com/fluxcd/pkg/masktoken v0.8.0 + github.com/fluxcd/pkg/oci v0.56.0 + github.com/fluxcd/pkg/runtime v0.84.0 + github.com/fluxcd/pkg/sourceignore v0.14.0 + github.com/fluxcd/pkg/ssh v0.21.0 + github.com/fluxcd/pkg/tar v0.14.0 + github.com/fluxcd/pkg/testserver v0.13.0 + github.com/fluxcd/pkg/version v0.10.0 + github.com/fluxcd/source-controller/api v1.7.0 + github.com/foxcpp/go-mockdns v1.1.0 + github.com/go-git/go-billy/v5 v5.6.2 + github.com/go-git/go-git/v5 v5.16.2 + github.com/go-logr/logr v1.4.3 + github.com/google/go-containerregistry v0.20.6 + github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 + github.com/google/uuid v1.6.0 + github.com/minio/minio-go/v7 v7.0.94 + github.com/notaryproject/notation-core-go v1.3.0 + github.com/notaryproject/notation-go v1.3.2 + github.com/onsi/gomega v1.38.2 + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/ory/dockertest/v3 v3.12.0 + github.com/otiai10/copy v1.14.1 + github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 + github.com/prometheus/client_golang v1.23.0 + github.com/sigstore/cosign/v2 v2.5.2 + github.com/sigstore/sigstore v1.9.5 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/pflag v1.0.10 + golang.org/x/crypto v0.41.0 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.16.0 + google.golang.org/api v0.248.0 + helm.sh/helm/v3 v3.19.0 + k8s.io/api v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/client-go v0.34.0 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + oras.land/oras-go/v2 v2.6.0 + sigs.k8s.io/controller-runtime v0.22.1 + sigs.k8s.io/yaml v1.6.0 ) + +require ( + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.16.5 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.30 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.2 // indirect + github.com/Azure/go-autorest/tracing v0.6.1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect + github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect + github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect + github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect + github.com/alibabacloud-go/debug v1.0.0 // indirect + github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect + github.com/alibabacloud-go/openapi-util v0.1.0 // indirect + github.com/alibabacloud-go/tea v1.2.1 // indirect + github.com/alibabacloud-go/tea-utils v1.4.5 // indirect + github.com/alibabacloud-go/tea-xml v1.1.3 // indirect + github.com/aliyun/credentials-go v1.3.2 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2 v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.10 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 // indirect + github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect + github.com/buildkite/agent/v3 v3.98.2 // indirect + github.com/buildkite/go-pipeline v0.13.3 // indirect + github.com/buildkite/interpolate v0.1.5 // indirect + github.com/buildkite/roko v1.3.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect + github.com/clbanning/mxj/v2 v2.7.0 // indirect + github.com/cloudflare/circl v1.6.1 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect + github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/continuity v0.4.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/coreos/go-oidc/v3 v3.15.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fluxcd/gitkit v0.6.0 // indirect + github.com/fluxcd/pkg/apis/acl v0.9.0 // indirect + github.com/fluxcd/pkg/lockedfile v0.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect + github.com/go-chi/chi v4.1.2+incompatible // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-ldap/ldap/v3 v3.4.10 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-piv/piv-go/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f // indirect + github.com/google/go-github/v72 v72.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/in-toto/attestation v1.1.1 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/dns v1.1.58 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/crc64nvme v1.0.1 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/notaryproject/notation-plugin-framework-go v1.0.0 // indirect + github.com/notaryproject/tspclient-go v1.0.0 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/oleiade/reflections v1.1.0 // indirect + github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a // indirect + github.com/opencontainers/runc v1.2.4 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/otiai10/mint v1.6.3 // indirect + github.com/pborman/uuid v1.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pjbgf/sha1cd v0.4.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect + github.com/redis/go-redis/v9 v9.8.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect + github.com/sigstore/protobuf-specs v0.4.3 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore-go v1.0.0 // indirect + github.com/sigstore/timestamp-authority v1.2.8 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/viper v1.20.1 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.1.1 // indirect + github.com/tinylib/msgp v1.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/tjfoc/gmsm v1.4.1 // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/veraison/go-cose v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/errs v1.4.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.130.1 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.8.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.36.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.34.0 // indirect + k8s.io/apiserver v0.34.0 // indirect + k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kubectl v0.34.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/release-utils v0.11.1 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect +) + +retract v0.32.0 // Refers to incorrect ./api version. diff --git a/go.sum b/go.sum index 818c3034c..369cd9509 100644 --- a/go.sum +++ b/go.sum @@ -1,1323 +1,1419 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= +cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.56.1 h1:n6gy+yLnHn0hTwBFzNn8zJ1kqWfR91wzdM8hjRF4wP0= +cloud.google.com/go/storage v1.56.1/go.mod h1:C9xuCZgFl3buo2HZU/1FncgvvOgTAs/rnh4gF4lMg0s= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1 h1:mRwydyTyhtRX2wXS3mqYWzR2qlv6KsmoKXmlz5vInjg= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg= +cuelang.org/go v0.12.1 h1:5I+zxmXim9MmiN2tqRapIqowQxABv2NKTgbOspud1Eo= +cuelang.org/go v0.12.1/go.mod h1:B4+kjvGGQnbkz+GuAv1dq/R308gTkp0sO28FdMrJ2Kw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 h1:ci6Yd6nysBRLEodoziB6ah1+YOzZbZk+NYneoA6q+6E= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 h1:ldKsKtEIblsgsr6mPwrd9yRntoX6uLz/K89wsldwx/k= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3/go.mod h1:MAm7bk0oDLmD8yIkvfbxPW04fxzphPyL+7GzwHxOp6Y= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 h1:figxyQZXzZQIcP3njhC68bYUiTw45J8/SsHaLW8Ax0M= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0/go.mod h1:TmlMW4W5OvXOmOyKNnor8nlMMiO1ctIyzmHme/VHsrA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= -github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.1.0 h1:j7GpgZ7PdFqNsmncycTHsLmVPf5/3wJtlgW9TNDYD9Y= -github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= -github.com/Masterminds/sprig/v3 v3.2.0 h1:P1ekkbuU73Ui/wS0nK1HOM37hh4xdfZo485UPf8rc+Y= -github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= -github.com/Masterminds/squirrel v1.4.0 h1:he5i/EXixZxrBUWcxzDYMiju9WZ3ld/l7QBNuo/eN3w= -github.com/Masterminds/squirrel v1.4.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= -github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= -github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= +github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= +github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= +github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= +github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU= +github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= +github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY= +github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA= +github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= +github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY= +github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask= +github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= +github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= +github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= +github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= +github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= +github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 h1:lcwFjRx3C/hBxJzoWkD6DIG2jeB+mzLmFVBFVOadxxE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1/go.mod h1:qt9OL5kXqWoSub4QAkOF74mS3M2zOTNxMODqgwEUjt8= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 h1:EfatDVSMFxaS5TiR0C0zssQU1Nm+rGx3VbUGIH1y274= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2/go.mod h1:oRy1IEgzXtOkEk4B/J7HZbXUC258drDLtkmc++lN7IA= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 h1:Txq5jxY/ao+2Vx/kX9+65WTqkzCnxSlXnwIj+Cr/fng= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1/go.mod h1:+hYFg3laewH0YCfJRv+o5R3bradDKmFIm/uaiaD1U7U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0 h1:2jKyib9msVrAVn+lngwlSplG13RpUZmzVte2yDao5nc= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0/go.mod h1:RyhzxkWGcfixlkieewzpO3D4P4fTMxhIDqDZWsh0u/4= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 h1:50sS0RWhGpW/yZx2KcDNEb1u1MANv5BMEkJgcieEDTA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1/go.mod h1:ErZOtbzuHabipRTDTor0inoRlYwbsV1ovwSxjGs/uJo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 h1:B91r9bHtXp/+XRgS5aZm6ZzTdz3ahgJYmkt4xZkgDz8= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0/go.mod h1:OeVe5ggFzoBnmgitZe/A+BqGOnv1DvU/0uiLQi1wutM= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/buildkite/agent/v3 v3.98.2 h1:VOOxv8XD8HVCtEvtRPQhvB6k2Gorha2gN1wGh94gYAA= +github.com/buildkite/agent/v3 v3.98.2/go.mod h1:+zCvvo/OlOwfs+AH3QvSn37H3cBXP3Fe18eoSbqUvnY= +github.com/buildkite/go-pipeline v0.13.3 h1:llI7sAdZ7sqYE7r8ePlmDADRhJ1K0Kua2+gv74Z9+Es= +github.com/buildkite/go-pipeline v0.13.3/go.mod h1:1uC2XdHkTV1G5jYv9K8omERIwrsYbBruBrPx1Zu1uFw= +github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.3.1 h1:t7K30ceLLYn6k7hQP4oq1c7dVlhgD5nRcuSRDEEnY1s= +github.com/buildkite/roko v1.3.1/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= +github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= +github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= -github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 h1:kIFnQBO7rQ0XkMe6xEwbybYHBEaWmh/f++laI6Emt7M= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= +github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= +github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg= +github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.19 h1:tUN6H7LWqNx4hQVxomd0CVsDwaDr9gaRQaI4GpSmrsA= +github.com/creack/pty v1.1.19/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/deislabs/oras v0.8.1 h1:If674KraJVpujYR00rzdi0QAmW4BxzMJPVAZJKuhQ0c= -github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= -github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492 h1:FwssHbCDJD025h+BchanCwE1Q8fyMgqDr2mOQAWOLGw= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= +github.com/emicklei/proto v1.13.4/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fluxcd/pkg/apis/meta v0.7.0 h1:5e8gm4OLqjuKWdrOIY5DEEsjcwzyJFK8rCDesJ+V8IY= -github.com/fluxcd/pkg/apis/meta v0.7.0/go.mod h1:yHuY8kyGHYz22I0jQzqMMGCcHViuzC/WPdo9Gisk8Po= -github.com/fluxcd/pkg/gittestserver v0.1.0 h1:BvIG+bBhgbmqhtpSS2qUpOXRIL1P1Ow2jauloH8X86U= -github.com/fluxcd/pkg/gittestserver v0.1.0/go.mod h1:HWZaoib03fQeSsauCAN2iAFdr6bnjKQ+CFxMFD2mwDY= -github.com/fluxcd/pkg/helmtestserver v0.1.0 h1:RiVVxIHD6PJdKinW46feFIYf1LUj6xXSpgARk+m9U7U= -github.com/fluxcd/pkg/helmtestserver v0.1.0/go.mod h1:3L+tbPn74PsHwHsyhbfk/kZAosrwMFTTA92XEFiwVAE= -github.com/fluxcd/pkg/lockedfile v0.0.5 h1:C3T8wfdff1UY1bvplmCkGOLrdMWJHO8Q8+tdlEXJYzQ= -github.com/fluxcd/pkg/lockedfile v0.0.5/go.mod h1:uAtPUBId6a2RqO84MTH5HKGX0SbM1kNW3Wr/FhYyDVA= -github.com/fluxcd/pkg/runtime v0.8.0 h1:cnSBZJLcXlKgjXpFFFExu+4ZncIxmPgNIx+ErLcCLnA= -github.com/fluxcd/pkg/runtime v0.8.0/go.mod h1:tQwEN+RESjJmtwSSv7I+6bkNM9raIXpGsCjruaIVX6A= -github.com/fluxcd/pkg/ssh v0.0.5 h1:rnbFZ7voy2JBlUfMbfyqArX2FYaLNpDhccGFC3qW83A= -github.com/fluxcd/pkg/ssh v0.0.5/go.mod h1:7jXPdXZpc0ttMNz2kD9QuMi3RNn/e0DOFbj0Tij/+Hs= -github.com/fluxcd/pkg/testserver v0.0.2 h1:SoaMtO9cE5p/wl2zkGudzflnEHd9mk68CGjZOo7w0Uk= -github.com/fluxcd/pkg/testserver v0.0.2/go.mod h1:pgUZTh9aQ44FSTQo+5NFlh7YMbUfdz1B80DalW7k96Y= -github.com/fluxcd/pkg/untar v0.0.5 h1:UGI3Ch1UIEIaqQvMicmImL1s9npQa64DJ/ozqHKB7gk= -github.com/fluxcd/pkg/untar v0.0.5/go.mod h1:O6V9+rtl8c1mHBafgqFlJN6zkF1HS5SSYn7RpQJ/nfw= -github.com/fluxcd/pkg/version v0.0.1 h1:/8asQoDXSThz3csiwi4Qo8Zb6blAxLXbtxNgeMJ9bCg= -github.com/fluxcd/pkg/version v0.0.1/go.mod h1:WAF4FEEA9xyhngF8TDxg3UPu5fA1qhEYV8Pmi2Il01Q= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fluxcd/cli-utils v0.36.0-flux.15 h1:Et5QLnIpRjj+oZtM9gEybkAaoNsjysHq0y1253Ai94Y= +github.com/fluxcd/cli-utils v0.36.0-flux.15/go.mod h1:AqRUmWIfNE7cdL6NWSGF0bAlypGs+9x5UQ2qOtlEzv4= +github.com/fluxcd/gitkit v0.6.0 h1:iNg5LTx6ePo+Pl0ZwqHTAkhbUHxGVSY3YCxCdw7VIFg= +github.com/fluxcd/gitkit v0.6.0/go.mod h1:svOHuKi0fO9HoawdK4HfHAJJseZDHHjk7I3ihnCIqNo= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/event v0.19.0 h1:ZJU2voontkzp5rNYA4JMOu40S4tRcrWi4Do59EnyFwg= +github.com/fluxcd/pkg/apis/event v0.19.0/go.mod h1:deuIyUb6lh+Z1Ccvwwxhm1wNM3kpSo+vF1IgRnpaZfQ= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fluxcd/pkg/artifact v0.3.0 h1:Mxescx4HOaXJDYhdgecmZwGdnrgPFu/N6sJY9GuTpuo= +github.com/fluxcd/pkg/artifact v0.3.0/go.mod h1:CFtfSBcma+WBkIhjxleaXoCwIjccdkunLO7gv/59xe8= +github.com/fluxcd/pkg/auth v0.31.0 h1:PIwSn7Onq74cGDTocZJZ6P47FxGvbT8NIW7UKFm51rU= +github.com/fluxcd/pkg/auth v0.31.0/go.mod h1:Qxc5OKRMLBwtxO0nf2stm4ZkgzXcrvF6x6BSquiAMW8= +github.com/fluxcd/pkg/cache v0.11.0 h1:fsE8S+una21fSNw4MDXGUIf0Gf1J+pqa4RbsVKf2aTI= +github.com/fluxcd/pkg/cache v0.11.0/go.mod h1:2RTIU6PsJniHmfnllQWFEo7fa5V8KQlnMgn4o0sme40= +github.com/fluxcd/pkg/git v0.36.0 h1:oakFKxTX5yiLcFzCS1SaV+mMXaODaF1Ic6/oCLfIe7I= +github.com/fluxcd/pkg/git v0.36.0/go.mod h1:4TgfjcoM3B2sGsO5VbfBSwJQYzNCONGihcTOW8P3Jxw= +github.com/fluxcd/pkg/git/gogit v0.40.0 h1:VCsHC1440jMk1wAGWCwkgU2nDUBOPeYbCk6/OtvbY7Y= +github.com/fluxcd/pkg/git/gogit v0.40.0/go.mod h1:nQVyfa+rYSeVQiwVH5f/C4o1sf2MtMFjMlt3VSkC+P0= +github.com/fluxcd/pkg/gittestserver v0.20.0 h1:xhzLV89mta23ZvTK0cpDCR6ni6vp5Di+9b4v3YBziMQ= +github.com/fluxcd/pkg/gittestserver v0.20.0/go.mod h1:vGmM9eDJk56gx+osTcSHeScefnAaL4czR+rsNsvh0nw= +github.com/fluxcd/pkg/helmtestserver v0.30.0 h1:gEJ6kHei8/SB8J/YemeWaypCxRtfmoejqMxtEOlZRgI= +github.com/fluxcd/pkg/helmtestserver v0.30.0/go.mod h1:xXOkfz7/4z8fz9GJYrYVB9we7bvtmdKKedBeGPHVlhs= +github.com/fluxcd/pkg/http/transport v0.7.0 h1:LbA0qzh1lT6GncWLkN/BjbSMrN8bdFtaa2TqxiIdyzs= +github.com/fluxcd/pkg/http/transport v0.7.0/go.mod h1:G3ptGZKlY0PJZsvWCwzV9vKQ90yfP/mKT2/ZdAud9LE= +github.com/fluxcd/pkg/lockedfile v0.7.0 h1:tmzW2GeMGuJMiCcVloXVd1vKZ92anm9WGkRgOBpWfRk= +github.com/fluxcd/pkg/lockedfile v0.7.0/go.mod h1:AzCV/h1N3hi/KtUDUCUgS8hl1+a1y+I6pmRo25dxdK0= +github.com/fluxcd/pkg/masktoken v0.8.0 h1:Dm5xIVNbg0s6zNttjDvimaG38bKsXwxBVo5b+D7ThVU= +github.com/fluxcd/pkg/masktoken v0.8.0/go.mod h1:Gc73ALOqIe+5Gj2V3JggMNiYcBiZ9bNNDYBE9R5XTTg= +github.com/fluxcd/pkg/oci v0.56.0 h1:t/jnHpizC+j7Gugw8y14HdfHnhLIgmxR3yNdArghUrM= +github.com/fluxcd/pkg/oci v0.56.0/go.mod h1:WZxMYYWfugc4rtnq2zHUIHxH0+e6IRhP9EDq+mW/Z2w= +github.com/fluxcd/pkg/runtime v0.84.0 h1:3M+egIQwQU9YYjKQkczyawG+9RUOkGtkDMQlePnEeTM= +github.com/fluxcd/pkg/runtime v0.84.0/go.mod h1:Wt9mUzQgMPQMu2D/wKl5pG4zh5vu/tfF5wq9pPobxOQ= +github.com/fluxcd/pkg/sourceignore v0.14.0 h1:ZiZzbXtXb/Qp7I7JCStsxOlX8ri8rWwCvmvIrJ0UzQQ= +github.com/fluxcd/pkg/sourceignore v0.14.0/go.mod h1:E3zKvyTyB+oQKqm/2I/jS6Rrt3B7fNuig/4bY2vi3bg= +github.com/fluxcd/pkg/ssh v0.21.0 h1:ZmyF0n9je0cTTkOpvFVgIhmdx9qtswnVE60TK4IzJh0= +github.com/fluxcd/pkg/ssh v0.21.0/go.mod h1:nX+gvJOmjf0E7lxq5mKKzDIdPEL2jOUQZbkBMS+mDtk= +github.com/fluxcd/pkg/tar v0.14.0 h1:9Gku8FIvPt2bixKldZnzXJ/t+7SloxePlzyVGOK8GVQ= +github.com/fluxcd/pkg/tar v0.14.0/go.mod h1:+rOWYk93qLEJ8WwmkvJOkB8i0dna1mrwJFybE8i9Udo= +github.com/fluxcd/pkg/testserver v0.13.0 h1:xEpBcEYtD7bwvZ+i0ZmChxKkDo/wfQEV3xmnzVybSSg= +github.com/fluxcd/pkg/testserver v0.13.0/go.mod h1:akRYv3FLQUsme15na9ihECRG6hBuqni4XEY9W8kzs8E= +github.com/fluxcd/pkg/version v0.10.0 h1:WETlCRbfbocsDItkCCeh/4x4zQkZ5i/lUe7P7VaQBrI= +github.com/fluxcd/pkg/version v0.10.0/go.mod h1:dgmjEq4ykvBnqK1oVXM+hcXx3kAY/b4uZDYUn8XnHjk= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= -github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= +github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= +github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU= +github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-piv/piv-go/v2 v2.3.0 h1:kKkrYlgLQTMPA6BiSL25A7/x4CEh2YCG7rtb/aTkx+g= +github.com/go-piv/piv-go/v2 v2.3.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.1 h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8= -github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/logger v1.0.1 h1:ZEgyRGgAm4ZAhAO45YXMs5Fp+bzGLESFewzAVBMKuTg= -github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/packd v0.3.0 h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr/v2 v2.7.1 h1:n3CIW5T17T8v4GGK5sWXLVWJhCz7b5aNLSxW6gYim4o= -github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 h1:1d9SJvpHXjFuYBHAS5576memil93kLpgBZ5OjdtvW4I= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039/go.mod h1:AlUTqI/YtH9ckkhLo4ClTAccEOZz8EaLVxqrfv56OFg= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f h1:GJRzEBoJv/A/E7JbTekq1Q0jFtAfY7TIxUFAK89Mmic= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f/go.mod h1:ZT74/OE6eosKneM9/LQItNxIMBV6CI5S46EXAnvkTBI= +github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM= +github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33 h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= -github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4= +github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= +github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= -github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libgit2/git2go/v31 v31.3.0 h1:d8ciyYVKir+gKwra3KuNxTyVvbgGKn4admdt1PNNAOg= -github.com/libgit2/git2go/v31 v31.3.0/go.mod h1:mnc0hPGPs0nDi9INrurTpioeRzje9DvSXqON/+JEhwY= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.12.0 h1:u/x3mp++qUxvYfulZ4HKOvVO0JWhk7HtE8lWhbGz/Do= -github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= -github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.5 h1:I2NIJ2ojwJqD/YByemC1M59e1b4FW9kS7NlOar7HPV4= -github.com/minio/minio-go/v7 v7.0.5/go.mod h1:TA0CQCjJZHM5SJj9IjqR0NmpmQJ6bCbXifAJ3mUU6Hw= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= +github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM= +github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= +github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/notaryproject/notation-core-go v1.3.0 h1:mWJaw1QBpBxpjLSiKOjzbZvB+xh2Abzk14FHWQ+9Kfs= +github.com/notaryproject/notation-core-go v1.3.0/go.mod h1:hzvEOit5lXfNATGNBT8UQRx2J6Fiw/dq/78TQL8aE64= +github.com/notaryproject/notation-go v1.3.2 h1:4223iLXOHhEV7ZPzIUJEwwMkhlgzoYFCsMJvSH1Chb8= +github.com/notaryproject/notation-go v1.3.2/go.mod h1:/1kuq5WuLF6Gaer5re0Z6HlkQRlKYO4EbWWT/L7J1Uw= +github.com/notaryproject/notation-plugin-framework-go v1.0.0 h1:6Qzr7DGXoCgXEQN+1gTZWuJAZvxh3p8Lryjn5FaLzi4= +github.com/notaryproject/notation-plugin-framework-go v1.0.0/go.mod h1:RqWSrTOtEASCrGOEffq0n8pSg2KOgKYiWqFWczRSics= +github.com/notaryproject/tspclient-go v1.0.0 h1:AwQ4x0gX8IHnyiZB1tggpn5NFqHpTEm1SDX8YNv4Dg4= +github.com/notaryproject/tspclient-go v1.0.0/go.mod h1:LGyA/6Kwd2FlM0uk8Vc5il3j0CddbWSHBj/4kxQDbjs= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/open-policy-agent/opa v1.5.1 h1:LTxxBJusMVjfs67W4FoRcnMfXADIGFMzpqnfk6D08Cg= +github.com/open-policy-agent/opa v1.5.1/go.mod h1:bYbS7u+uhTI+cxHQIpzvr5hxX0hV7urWtY+38ZtjMgk= +github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be h1:f2PlhC9pm5sqpBZFvnAoKj+KzXRzbjFMA+TqXfJdgho= +github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a h1:IAncDmJeD90l6+YR1Gf6r0HrmnRmOatzPfUpMS80ZTI= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a/go.mod h1:kqQaIc6bZstKgnGpL7GD5dWoLKbA6mH1Y9ULjGImBnM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.4 h1:yWFgLkghp71D76Fa0l349yAl5g4Gse7DPYNlvkQ9Eiw= +github.com/opencontainers/runc v1.2.4/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= +github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= +github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.4.0 h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY= -github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk= -github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d h1:HWfigq7lB31IeJL8iy7jkUmU/PG1Sr8jVGhS749dbUA= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sigstore/cosign/v2 v2.5.2 h1:i5Dw7M7W9OcWgyiknJB8vNx/07KweninBDxRoHPxqHE= +github.com/sigstore/cosign/v2 v2.5.2/go.mod h1:CYlcgkPQJZ5pvWlbl7mOfO/Q1S1N7r4tpdYCtFwhXco= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= +github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= +github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= +github.com/sigstore/sigstore-go v1.0.0 h1:4N07S2zLxf09nTRwaPKyAxbKzpM8WJYUS8lWWaYxneU= +github.com/sigstore/sigstore-go v1.0.0/go.mod h1:UYsZ/XHE4eltv1o1Lu+n6poW1Z5to3f0+emvfXNxIN8= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 h1:qp2VFyKuFQvTGmZwk5Q7m5nE4NwnF9tHwkyz0gtWAck= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5/go.mod h1:DKlQjjr+GsWljEYPycI0Sf8URLCk4EbGA9qYjF47j4g= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 h1:CRZcdYn5AOptStsLRAAACudAVmb1qUbhMlzrvm7ju3o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5/go.mod h1:b9rFfITq2fp1M3oJmq6lFFhSrAz5vOEJH1qzbMsZWN4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5 h1:7U0GsO0UGG1PdtgS6wBkRC0sMgq7BRVaFlPRwN4m1Qg= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5/go.mod h1:/2qrI0nnCy/DTIPOMFaZlFnNPWEn5UeS70P37XEM88o= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 h1:S2ukEfN1orLKw2wEQIUHDDlzk0YcylhcheeZ5TGk8LI= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5/go.mod h1:m7sQxVJmDa+rsmS1m6biQxaLX83pzNS7ThUEyjOqkCU= +github.com/sigstore/timestamp-authority v1.2.8 h1:BEV3fkphwU4zBp3allFAhCqQb99HkiyCXB853RIwuEE= +github.com/sigstore/timestamp-authority v1.2.8/go.mod h1:G2/0hAZmLPnevEwT1S9IvtNHUm9Ktzvso6xuRhl94ZY= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d h1:QKK1cJOPfb6nDDB8fC1l41/IcezASje2lsA13diVqfM= -github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.1.1 h1:OWcoHItwsGO+7m0wLa7FDWPR4oB1cj0zOr1kosE4G+I= +github.com/theupdateframework/go-tuf/v2 v2.1.1/go.mod h1:V675cQGhZONR0OGQ8r1feO0uwtsTBYPDWHzAAPn5rjE= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.4.0 h1:8VPZeZI4EeZ8P/vB6SIkhlStrJfivTJn+cQ4dtyHNh0= +github.com/tink-crypto/tink-go/v2 v2.4.0/go.mod h1:l//evrF2Y3MjdbpNDNGnKgCpo5zSmvUvnQ4MU+yE2sw= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= +github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.26 h1:REqqFkO8+SOEgZHR/eHScjjVjGS8Nk3RMO/juiTobN4= +github.com/vektah/gqlparser/v2 v2.5.26/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/veraison/go-cose v1.3.0 h1:2/H5w8kdSpQJyVtIhx8gmwPJ2uSz1PkyWFx0idbd7rk= +github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= +github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= +github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= +gitlab.com/gitlab-org/api/client-go v0.130.1 h1:1xF5C5Zq3sFeNg3PzS2z63oqrxifne3n/OnbI7nptRc= +gitlab.com/gitlab-org/api/client-go v0.130.1/go.mod h1:ZhSxLAWadqP6J9lMh40IAZOlOxBLPRh7yFOXR/bMJWM= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= +go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= +go.step.sm/crypto v0.66.0 h1:9TW6BEguOtcS9NIjja9bDQ+j8OjhenU/F6lJfHjbXNU= +go.step.sm/crypto v0.66.0/go.mod h1:anqGyvO/Px05D1mznHq4/a9wwP1I1DmMZvk+TWX5Dzo= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.248.0 h1:hUotakSkcwGdYUqzCRc5yGYsg4wXxpkKlW5ryVqvC1Y= +google.golang.org/api v0.248.0/go.mod h1:yAFUAF56Li7IuIQbTFoLwXTCI6XCFKueOlS7S9e4F9k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= -gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -helm.sh/helm/v3 v3.4.2 h1:ML8oFGsLQ36rawntKLFW1l/n8pI/bPB3c8947eQmDWo= -helm.sh/helm/v3 v3.4.2/go.mod h1:O4USJi4CwjSHEPPYmw2NpA1omXiaKu8ePA3cbxk66RQ= -helm.sh/helm/v3 v3.5.0 h1:uqIT3Bh4hVEyZRThyTPik8FkiABj3VJIY+POvDFT3a4= -helm.sh/helm/v3 v3.5.0/go.mod h1:bjwXfmGAF+SEuJZ2AtN1xmTuz4FqaNYOJrXP+vtj6Tw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= +helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= -k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= -k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/apiextensions-apiserver v0.19.4 h1:D9ak9T012tb3vcGFWYmbQuj9SCC8YM4zhA4XZqsAQC4= -k8s.io/apiextensions-apiserver v0.19.4/go.mod h1:B9rpH/nu4JBCtuUp3zTTk8DEjZUupZTBEec7/2zNRYw= -k8s.io/apiextensions-apiserver v0.20.1 h1:ZrXQeslal+6zKM/HjDXLzThlz/vPSxrfK3OqL8txgVQ= -k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= -k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= -k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apiserver v0.19.4/go.mod h1:X8WRHCR1UGZDd7HpV0QDc1h/6VbbpAeAGyxSh8yzZXw= -k8s.io/apiserver v0.20.1 h1:yEqdkxlnQbxi/3e74cp0X16h140fpvPrNnNRAJBDuBk= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/cli-runtime v0.19.4 h1:FPpoqFbWsFzRbZNRI+o/+iiLFmWMYTmBueIj3OaNVTI= -k8s.io/cli-runtime v0.19.4/go.mod h1:m8G32dVbKOeaX1foGhleLEvNd6REvU7YnZyWn5//9rw= -k8s.io/cli-runtime v0.20.1 h1:fJhRQ9EfTpJpCqSFOAqnYLuu5aAM7yyORWZ26qW1jJc= -k8s.io/cli-runtime v0.20.1/go.mod h1:6wkMM16ZXTi7Ow3JLYPe10bS+XBnIkL6V9dmEz0mbuY= -k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8= -k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ= -k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE= -k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/component-base v0.19.4 h1:HobPRToQ8KJ9ubRju6PUAk9I5V1GNMJZ4PyWbiWA0uI= -k8s.io/component-base v0.19.4/go.mod h1:ZzuSLlsWhajIDEkKF73j64Gz/5o0AgON08FgRbEPI70= -k8s.io/component-base v0.20.1 h1:6OQaHr205NSl24t5wOF2IhdrlxZTWEZwuGlLvBgaeIg= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-helpers v0.20.1/go.mod h1:Q8trCj1zyLNdeur6pD2QvsF8d/nWVfK71YjN5+qVXy4= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kubectl v0.19.4 h1:XFrHibf5fS4Ot8h3EnzdVsKrYj+pndlzKbwPkfra5hI= -k8s.io/kubectl v0.19.4/go.mod h1:XPmlu4DJEYgD83pvZFeKF8+MSvGnYGqunbFSrJsqHv0= -k8s.io/kubectl v0.20.1 h1:7h1vSrL/B3hLrhlCJhbTADElPKDbx+oVUt3+QDSXxBo= -k8s.io/kubectl v0.20.1/go.mod h1:2bE0JLYTRDVKDiTREFsjLAx4R2GvUtL/mGYFXfFFMzY= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.19.4/go.mod h1:a0gvAzrxQPw2ouBqnXI7X9qlggpPkKAFgWU/Py+KZiU= -k8s.io/metrics v0.20.1/go.mod h1:JhpBE/fad3yRGsgEpiZz5FQQM5wJ18OTLkD7Tv40c0s= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= -rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.8.0 h1:s0dYdo7lQgJiAf+alP82PRwbz+oAqL3oSyMQ18XRDOc= -sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= -sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/release-utils v0.11.1 h1:hzvXGpHgHJfLOJB6TRuu14bzWc3XEglHmXHJqwClSZE= +sigs.k8s.io/release-utils v0.11.1/go.mod h1:ybR2V/uQAOGxYfzYtBenSYeXWkBGNP2qnEiX77ACtpc= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/hack/api-docs/config.json b/hack/api-docs/config.json index 45c968401..ea8b2b9a5 100644 --- a/hack/api-docs/config.json +++ b/hack/api-docs/config.json @@ -9,11 +9,11 @@ "externalPackages": [ { "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", - "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" + "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" }, { "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Condition$", - "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition" + "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition" }, { "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", @@ -21,7 +21,15 @@ }, { "typeMatchPrefix": "^github.com/fluxcd/pkg/apis/meta", - "docsURLTemplate": "https://godoc.org/github.com/fluxcd/pkg/apis/meta#{{ .TypeIdentifier }}" + "docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#{{ .TypeIdentifier }}" + }, + { + "typeMatchPrefix": "^github.com/fluxcd/pkg/apis/acl", + "docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#{{ .TypeIdentifier }}" + }, + { + "typeMatchPrefix": "^github.com/fluxcd/source-controller/api/v1", + "docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#{{ .TypeIdentifier }}" } ], "typeDisplayNamePrefixOverrides": { diff --git a/hack/api-docs/template/pkg.tpl b/hack/api-docs/template/pkg.tpl index f2b3140f2..0cb681a67 100644 --- a/hack/api-docs/template/pkg.tpl +++ b/hack/api-docs/template/pkg.tpl @@ -1,5 +1,10 @@ {{ define "packages" }} -

Source API reference

+

Source API reference + {{- with (index .packages 0) -}} + {{ with (index .GoPackages 0 ) -}} + {{ printf " %s" .Name -}} + {{ end -}} + {{ end }}

{{ with .packages}}

Packages:

diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index ae5141121..f186b9dd3 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2020 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/hack/ci/e2e.sh b/hack/ci/e2e.sh new file mode 100755 index 000000000..ba7c4a6c1 --- /dev/null +++ b/hack/ci/e2e.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +set -eoux pipefail + +CREATE_CLUSTER="${CREATE_CLUSTER:-true}" +KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kind}" +LOAD_IMG_INTO_KIND="${LOAD_IMG_INTO_KIND:-true}" +BUILD_PLATFORM="${BUILD_PLATFORM:-linux/amd64}" + +IMG=test/source-controller +TAG=latest + +ROOT_DIR="$(git rev-parse --show-toplevel)" +BUILD_DIR="${ROOT_DIR}/build" + +if "${CREATE_CLUSTER}"; then + KIND_CLUSTER_NAME="flux-${RANDOM}" + export KUBECONFIG="${ROOT_DIR}/build/kindconfig" + + echo "Spinning up flux kind cluster" + kind create cluster --name "${KIND_CLUSTER_NAME}" --kubeconfig "${KUBECONFIG}" +fi + +function cleanup(){ + EXIT_CODE="$?" + + # only dump all logs if an error has occurred + if [ ${EXIT_CODE} -ne 0 ]; then + kubectl -n kube-system describe pods + kubectl -n source-system describe pods + kubectl -n source-system get gitrepositories -oyaml + kubectl -n source-system get ocirepositories -oyaml + kubectl -n source-system get helmrepositories -oyaml + kubectl -n source-system get helmcharts -oyaml + kubectl -n source-system get all + kubectl -n source-system logs deploy/source-controller + else + echo "All E2E tests passed!" + fi + + if "${CREATE_CLUSTER}"; then + echo "Delete cluster" + kind delete cluster --name "${KIND_CLUSTER_NAME}" + fi + exit ${EXIT_CODE} +} +trap cleanup EXIT + +# Wait for nodes to be ready and pods to be running +kubectl wait node "${KIND_CLUSTER_NAME}-control-plane" --for=condition=ready --timeout=2m +kubectl wait --for=condition=ready -n kube-system -l k8s-app=kube-dns pod +kubectl wait --for=condition=ready -n local-path-storage -l app=local-path-provisioner pod + +echo "Build, load image into kind and deploy controller" +make docker-build IMG="${IMG}" TAG="${TAG}" BUILD_PLATFORMS="${BUILD_PLATFORM}" BUILD_ARGS=--load + +if "${LOAD_IMG_INTO_KIND}"; then + kind load docker-image --name "${KIND_CLUSTER_NAME}" "${IMG}":"${TAG}" +fi + +make dev-deploy IMG="${IMG}" TAG="${TAG}" + +echo "Run smoke tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/samples" +kubectl -n source-system rollout status deploy/source-controller --timeout=1m +kubectl -n source-system wait gitrepository/gitrepository-sample --for=condition=ready --timeout=1m +kubectl -n source-system wait ocirepository/ocirepository-sample --for=condition=ready --timeout=1m +kubectl -n source-system wait helmrepository/helmrepository-sample --for=condition=ready --timeout=1m +kubectl -n source-system wait helmchart/helmchart-sample --for=condition=ready --timeout=1m +kubectl -n source-system wait helmchart/helmchart-sample-oci --for=condition=ready --timeout=1m +kubectl -n source-system delete -f "${ROOT_DIR}/config/samples" + +echo "Run HelmChart values file tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-valuesfile" +kubectl -n source-system wait helmchart/podinfo --for=condition=ready --timeout=5m +kubectl -n source-system wait helmchart/podinfo-git --for=condition=ready --timeout=5m +kubectl -n source-system delete -f "${ROOT_DIR}/config/testdata/helmchart-valuesfile" + +echo "Run large Git repo tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/git/large-repo.yaml" +kubectl -n source-system wait gitrepository/large-repo --for=condition=ready --timeout=2m15s + +echo "Run HelmChart from OCI registry tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oci/source.yaml" +kubectl -n source-system wait helmchart/podinfo --for=condition=ready --timeout=1m +kubectl -n source-system wait helmchart/podinfo-keyless --for=condition=ready --timeout=1m + +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oci/notation.yaml" +curl -sSLo notation.crt https://raw.githubusercontent.com/stefanprodan/podinfo/master/.notation/notation.crt +curl -sSLo trustpolicy.json https://raw.githubusercontent.com/stefanprodan/podinfo/master/.notation/trustpolicy.json +kubectl -n source-system create secret generic notation-config --from-file=notation.crt --from-file=trustpolicy.json --dry-run=client -o yaml | kubectl apply -f - +kubectl -n source-system wait helmchart/podinfo-notation --for=condition=ready --timeout=1m + +echo "Run OCIRepository verify tests" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-key.yaml" +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-keyless.yaml" +curl -sSLo cosign.pub https://raw.githubusercontent.com/stefanprodan/podinfo/master/.cosign/cosign.pub +kubectl -n source-system create secret generic cosign-key --from-file=cosign.pub --dry-run=client -o yaml | kubectl apply -f - + +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-key --for=condition=ready --timeout=1m +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-keyless --for=condition=ready --timeout=1m + +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-notation.yaml" +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-notation --for=condition=ready --timeout=1m diff --git a/internal/bucket/azure/blob.go b/internal/bucket/azure/blob.go new file mode 100644 index 000000000..5bf814b7d --- /dev/null +++ b/internal/bucket/azure/blob.go @@ -0,0 +1,523 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "context" + "crypto/md5" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/auth" + azureauth "github.com/fluxcd/pkg/auth/azure" + "github.com/fluxcd/pkg/masktoken" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +var ( + // ErrorDirectoryExists is an error returned when the filename provided + // is a directory. + ErrorDirectoryExists = errors.New("filename is a directory") +) + +const ( + clientIDField = "clientId" + tenantIDField = "tenantId" + clientSecretField = "clientSecret" + clientCertificateField = "clientCertificate" + clientCertificatePasswordField = "clientCertificatePassword" + clientCertificateSendChainField = "clientCertificateSendChain" + authorityHostField = "authorityHost" + accountKeyField = "accountKey" + sasKeyField = "sasKey" +) + +// BlobClient is a minimal Azure Blob client for fetching objects. +type BlobClient struct { + *azblob.Client +} + +// Option configures the BlobClient. +type Option func(*options) + +// WithSecret sets the Secret to use for the BlobClient. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithProxyURL sets the proxy URL to use for the BlobClient. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +type options struct { + secret *corev1.Secret + proxyURL *url.URL + withoutCredentials bool + withoutRetries bool + authOpts []auth.Option +} + +// withoutCredentials forces the BlobClient to not use any credentials. +// This is a test-only option useful for testing the client with HTTP +// endpoints (without TLS) alongside all the other options unrelated to +// credentials. +func withoutCredentials() Option { + return func(o *options) { + o.withoutCredentials = true + } +} + +// withoutRetries sets the BlobClient to not retry requests. +// This is a test-only option useful for testing connection errors. +func withoutRetries() Option { + return func(o *options) { + o.withoutRetries = true + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + +// NewClient creates a new Azure Blob storage client. +// The credential config on the client is set based on the data from the +// Bucket and Secret. It detects credentials in the Secret in the following +// order: +// +// - azidentity.ClientSecretCredential when `tenantId`, `clientId` and +// `clientSecret` fields are found. +// - azidentity.ClientCertificateCredential when `tenantId`, +// `clientCertificate` (and optionally `clientCertificatePassword`) fields +// are found. +// - azidentity.ManagedIdentityCredential for a User ID, when a `clientId` +// field but no `tenantId` is found. +// - azidentity.WorkloadIdentityCredential for when environment variables +// (AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID) +// are set by the Azure workload identity webhook. +// - azblob.SharedKeyCredential when an `accountKey` field is found. +// The account name is extracted from the endpoint specified on the Bucket +// object. +// - azidentity.ChainedTokenCredential with azidentity.EnvironmentCredential +// and azidentity.ManagedIdentityCredential. +// +// If no credentials are found, and the azidentity.ChainedTokenCredential can +// not be established. A simple client without credentials is returned. +func NewClient(ctx context.Context, obj *sourcev1.Bucket, opts ...Option) (c *BlobClient, err error) { + c = &BlobClient{} + + var o options + for _, opt := range opts { + opt(&o) + } + + clientOpts := &azblob.ClientOptions{} + + if o.proxyURL != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(o.proxyURL) + clientOpts.ClientOptions.Transport = &http.Client{Transport: transport} + } + + if o.withoutRetries { + clientOpts.ClientOptions.Retry.ShouldRetry = func(resp *http.Response, err error) bool { + return false + } + } + + if o.withoutCredentials { + c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, clientOpts) + return + } + + var token azcore.TokenCredential + + if o.secret != nil && len(o.secret.Data) > 0 { + // Attempt AAD Token Credential options first. + if token, err = tokenCredentialFromSecret(o.secret); err != nil { + err = fmt.Errorf("failed to create token credential from '%s' Secret: %w", o.secret.Name, err) + return + } + if token != nil { + c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, clientOpts) + return + } + + // Fallback to Shared Key Credential. + var cred *azblob.SharedKeyCredential + if cred, err = sharedCredentialFromSecret(obj.Spec.Endpoint, o.secret); err != nil { + return + } + if cred != nil { + c.Client, err = azblob.NewClientWithSharedKeyCredential(obj.Spec.Endpoint, cred, clientOpts) + return + } + + var fullPath string + if fullPath, err = sasTokenFromSecret(obj.Spec.Endpoint, o.secret); err != nil { + return + } + + c.Client, err = azblob.NewClientWithNoCredential(fullPath, clientOpts) + return + } + + // Compose token chain based on environment. + // This functions as a replacement for azidentity.NewDefaultAzureCredential + // to not shell out. + token, err = chainCredentialWithSecret(ctx, o.secret, o.authOpts...) + if err != nil { + err = fmt.Errorf("failed to create environment credential chain: %w", err) + return nil, err + } + if token != nil { + c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, clientOpts) + return + } + + // Fallback to simple client. + c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, clientOpts) + return +} + +// ValidateSecret validates if the provided Secret does at least have one valid +// set of credentials. The provided Secret may be nil. +func ValidateSecret(secret *corev1.Secret) error { + if secret == nil { + return nil + } + + var valid bool + if _, hasTenantID := secret.Data[tenantIDField]; hasTenantID { + if _, hasClientID := secret.Data[clientIDField]; hasClientID { + if _, hasClientSecret := secret.Data[clientSecretField]; hasClientSecret { + valid = true + } + if _, hasClientCertificate := secret.Data[clientCertificateField]; hasClientCertificate { + valid = true + } + } + } + if _, hasClientID := secret.Data[clientIDField]; hasClientID { + valid = true + } + if _, hasAccountKey := secret.Data[accountKeyField]; hasAccountKey { + valid = true + } + if _, hasSasKey := secret.Data[sasKeyField]; hasSasKey { + valid = true + } + if _, hasAuthorityHost := secret.Data[authorityHostField]; hasAuthorityHost { + valid = true + } + + if !valid { + return fmt.Errorf("invalid '%s' secret data: requires a '%s' or '%s' field, a combination of '%s', '%s' and '%s', or '%s', '%s' and '%s'", + secret.Name, clientIDField, accountKeyField, tenantIDField, clientIDField, clientSecretField, tenantIDField, clientIDField, clientCertificateField) + } + return nil +} + +// BucketExists returns if an object storage bucket with the provided name +// exists, or returns a (client) error. +func (c *BlobClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { + items := c.Client.NewListBlobsFlatPager(bucketName, &azblob.ListBlobsFlatOptions{ + MaxResults: to.Ptr(int32(1)), + }) + // We call next page only once since we just want to see if we get an error + if _, err := items.NextPage(ctx); err != nil { + if bloberror.HasCode(err, bloberror.ContainerNotFound) { + return false, nil + } + + // For a container-level SASToken, we get an AuthenticationFailed when the bucket doesn't exist + if bloberror.HasCode(err, bloberror.AuthenticationFailed) { + return false, fmt.Errorf("the specified bucket name may be incorrect, nonexistent, or the caller might lack sufficient permissions to access it: %w", err) + } + + return false, err + } + return true, nil +} + +// FGetObject gets the object from the provided object storage bucket, and +// writes it to targetPath. +// It returns the etag of the successfully fetched file, or any error. +func (c *BlobClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { + // Verify if destination already exists. + dirStatus, err := os.Stat(localPath) + if err == nil { + // If the destination exists and is a directory. + if dirStatus.IsDir() { + return "", ErrorDirectoryExists + } + } + + // Proceed if file does not exist, return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(localPath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0o700); err != nil { + return "", err + } + } + + // Download object. + res, err := c.DownloadStream(ctx, bucketName, objectName, nil) + if err != nil { + return "", err + } + + // Prepare target file. + f, err := os.OpenFile(localPath, os.O_CREATE|os.O_WRONLY, 0o600) + if err != nil { + return "", err + } + + // Calculate hash during write. + // NOTE: not actively used at present, as MD5 is not consistently returned + // by API. + hash := md5.New() + + // Off we go. + mw := io.MultiWriter(f, hash) + if _, err = io.Copy(mw, res.Body); err != nil { + if err = f.Close(); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to close file after copy error") + } + return "", err + } + if err = f.Close(); err != nil { + return "", err + } + + return string(*res.ETag), nil +} + +// VisitObjects iterates over the items in the provided object storage +// bucket, calling visit for every item. +// If the underlying client or the visit callback returns an error, +// it returns early. +func (c *BlobClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(path, etag string) error) error { + items := c.NewListBlobsFlatPager(bucketName, nil) + for items.More() { + resp, err := items.NextPage(ctx) + if err != nil { + err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, err) + return err + } + for _, blob := range resp.Segment.BlobItems { + if err := visit(*blob.Name, fmt.Sprintf("%x", *blob.Properties.ETag)); err != nil { + err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, err) + return err + } + } + } + + return nil +} + +// Close has no effect on BlobClient. +func (c *BlobClient) Close(_ context.Context) {} + +// ObjectIsNotFound checks if the error provided is an azblob.StorageError with +// an azblob.StorageErrorCodeBlobNotFound error code. +func (c *BlobClient) ObjectIsNotFound(err error) bool { + return bloberror.HasCode(err, bloberror.BlobNotFound) +} + +// tokenCredentialsFromSecret attempts to create an azcore.TokenCredential +// based on the data fields of the given Secret. It returns, in order: +// - azidentity.ClientSecretCredential when `tenantId`, `clientId` and +// `clientSecret` fields are found. +// - azidentity.ClientCertificateCredential when `tenantId`, +// `clientCertificate` (and optionally `clientCertificatePassword`) fields +// are found. +// - azidentity.ManagedIdentityCredential for a User ID, when a `clientId` +// field but no `tenantId` is found. +// - Nil, if no valid set of credential fields was found. +func tokenCredentialFromSecret(secret *corev1.Secret) (azcore.TokenCredential, error) { + if secret == nil { + return nil, nil + } + + clientID, hasClientID := secret.Data[clientIDField] + if tenantID, hasTenantID := secret.Data[tenantIDField]; hasTenantID && hasClientID { + if clientSecret, hasClientSecret := secret.Data[clientSecretField]; hasClientSecret && len(clientSecret) > 0 { + opts := &azidentity.ClientSecretCredentialOptions{} + if authorityHost, hasAuthorityHost := secret.Data[authorityHostField]; hasAuthorityHost { + opts.Cloud = cloud.Configuration{ActiveDirectoryAuthorityHost: string(authorityHost)} + } + return azidentity.NewClientSecretCredential(string(tenantID), string(clientID), string(clientSecret), opts) + } + if clientCertificate, hasClientCertificate := secret.Data[clientCertificateField]; hasClientCertificate && len(clientCertificate) > 0 { + certs, key, err := azidentity.ParseCertificates(clientCertificate, secret.Data[clientCertificatePasswordField]) + if err != nil { + return nil, fmt.Errorf("failed to parse client certificates: %w", err) + } + opts := &azidentity.ClientCertificateCredentialOptions{} + if authorityHost, hasAuthorityHost := secret.Data[authorityHostField]; hasAuthorityHost { + opts.Cloud = cloud.Configuration{ActiveDirectoryAuthorityHost: string(authorityHost)} + } + if v, sendChain := secret.Data[clientCertificateSendChainField]; sendChain { + opts.SendCertificateChain = string(v) == "1" || strings.ToLower(string(v)) == "true" + } + return azidentity.NewClientCertificateCredential(string(tenantID), string(clientID), certs, key, opts) + } + } + if hasClientID { + return azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{ + ID: azidentity.ClientID(clientID), + }) + } + return nil, nil +} + +// sharedCredentialFromSecret attempts to create an azblob.SharedKeyCredential +// based on the data fields of the given Secret. It returns nil if the Secret +// does not contain a valid set of credentials. +func sharedCredentialFromSecret(endpoint string, secret *corev1.Secret) (*azblob.SharedKeyCredential, error) { + if accountKey, hasAccountKey := secret.Data[accountKeyField]; hasAccountKey { + accountName, err := extractAccountNameFromEndpoint(endpoint) + if err != nil { + return nil, fmt.Errorf("failed to create shared credential from '%s' Secret data: %w", secret.Name, err) + } + return azblob.NewSharedKeyCredential(accountName, string(accountKey)) + } + return nil, nil +} + +// sasTokenFromSecret retrieves the SAS Token from the `sasKey`. It returns an empty string if the Secret +// does not contain a valid set of credentials. +func sasTokenFromSecret(ep string, secret *corev1.Secret) (string, error) { + if sasKey, hasSASKey := secret.Data[sasKeyField]; hasSASKey { + queryString := strings.TrimPrefix(string(sasKey), "?") + values, err := url.ParseQuery(queryString) + if err != nil { + maskedErrorString, maskErr := masktoken.MaskTokenFromString(err.Error(), string(sasKey)) + if maskErr != nil { + return "", fmt.Errorf("error redacting token from error message: %s", maskErr) + } + return "", fmt.Errorf("unable to parse SAS token: %s", maskedErrorString) + } + + epURL, err := url.Parse(ep) + if err != nil { + return "", fmt.Errorf("unable to parse endpoint URL: %s", err) + } + + //merge the query values in the endpoint with the token + epValues := epURL.Query() + for key, val := range epValues { + if !values.Has(key) { + for _, str := range val { + values.Add(key, str) + } + } + } + + epURL.RawQuery = values.Encode() + return epURL.String(), nil + } + return "", nil +} + +// chainCredentialWithSecret tries to create a set of tokens, and returns an +// azidentity.ChainedTokenCredential if at least one of the following tokens was +// successfully created: +// +// - azidentity.EnvironmentCredential with `authorityHost` from Secret, if +// provided. +// - azidentity.WorkloadIdentityCredential with Client ID from AZURE_CLIENT_ID plus +// AZURE_TENANT_ID, AZURE_FEDERATED_TOKEN_FILE from environment variables +// environment variable, if found. +// - azidentity.ManagedIdentityCredential with Client ID from AZURE_CLIENT_ID +// environment variable, if found. +// - azidentity.ManagedIdentityCredential with defaults. +// +// If no valid token is created, it returns nil. +func chainCredentialWithSecret(ctx context.Context, secret *corev1.Secret, opts ...auth.Option) (azcore.TokenCredential, error) { + var creds []azcore.TokenCredential + + credOpts := &azidentity.EnvironmentCredentialOptions{} + if secret != nil { + if authorityHost, hasAuthorityHost := secret.Data[authorityHostField]; hasAuthorityHost { + credOpts.Cloud = cloud.Configuration{ActiveDirectoryAuthorityHost: string(authorityHost)} + } + } + + if token, _ := azidentity.NewEnvironmentCredential(credOpts); token != nil { + creds = append(creds, token) + } + if token := azureauth.NewTokenCredential(ctx, opts...); token != nil { + creds = append(creds, token) + } + + if len(creds) > 0 { + return azidentity.NewChainedTokenCredential(creds, nil) + } + + return nil, nil +} + +// extractAccountNameFromEndpoint extracts the Azure account name from the +// provided endpoint URL. It parses the endpoint as a URL, and returns the +// first subdomain as the assumed account name. +// It returns an error when it fails to parse the endpoint as a URL, or if it +// does not have any subdomains. +func extractAccountNameFromEndpoint(endpoint string) (string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", fmt.Errorf("failed to extract account name from endpoint: %w", err) + } + hostname := u.Hostname() + parts := strings.Split(hostname, ".") + if len(parts) <= 2 { + return "", fmt.Errorf("failed to extract account name from endpoint: expected '%s' to be a subdomain", hostname) + } + return parts[0], nil +} diff --git a/internal/bucket/azure/blob_integration_test.go b/internal/bucket/azure/blob_integration_test.go new file mode 100644 index 000000000..704b4c0c3 --- /dev/null +++ b/internal/bucket/azure/blob_integration_test.go @@ -0,0 +1,459 @@ +//go:build integration + +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "context" + "crypto/md5" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "log" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +var ( + testTimeout = time.Second * 10 +) + +var ( + testAccountName = os.Getenv("TEST_AZURE_ACCOUNT_NAME") + testAccountKey = os.Getenv("TEST_AZURE_ACCOUNT_KEY") + cred *azblob.SharedKeyCredential +) + +var ( + testContainerGenerateName = "azure-client-test-" + testFile = "test.yaml" + testFileData = ` +--- +test: file +` + testFile2 = "test2.yaml" + testFile2Data = ` +--- +test: file2 +` + testBucket = sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + Endpoint: endpointURL(testAccountName), + }, + } + testSecret = corev1.Secret{ + Data: map[string][]byte{ + accountKeyField: []byte(testAccountKey), + }, + } +) + +func TestMain(m *testing.M) { + var err error + cred, err = blob.NewSharedKeyCredential(testAccountName, testAccountKey) + if err != nil { + log.Fatalf("unable to create shared key creds: %s", err.Error()) + } + code := m.Run() + os.Exit(code) +} + +func TestBlobClient_BucketExists(t *testing.T) { + g := NewWithT(t) + + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Test if the container exists. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + ok, err := client.BucketExists(ctx, testContainer) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) +} + +func TestBlobClient_BucketNotExists(t *testing.T) { + g := NewWithT(t) + + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Test if the container exists. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + ok, err := client.BucketExists(ctx, testContainer) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeFalse()) +} + +func TestBlobClient_FGetObject(t *testing.T) { + g := NewWithT(t) + + tempDir := t.TempDir() + + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Create test blob. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) + + localPath := filepath.Join(tempDir, testFile) + + // Test if blob exists. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + _, err = client.FGetObject(ctx, testContainer, testFile, localPath) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(localPath).To(BeARegularFile()) + f, _ := os.ReadFile(localPath) + g.Expect(f).To(Equal([]byte(testFileData))) +} + +func TestBlobClientSASKey_FGetObject(t *testing.T) { + g := NewWithT(t) + + tempDir := t.TempDir() + + // create a client with the shared key + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Create test blob. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)).To(Succeed()) + localPath := filepath.Join(tempDir, testFile) + + // use the shared key client to create a SAS key for the account + cred, err := service.NewSharedKeyCredential(testAccountName, testAccountKey) + g.Expect(err).ToNot(HaveOccurred()) + url := fmt.Sprintf("https://%s.blob.core.windows.net", testAccountName) + serviceClient, err := service.NewClientWithSharedKeyCredential(url, cred, nil) + g.Expect(err).ToNot(HaveOccurred()) + sasKey, err := serviceClient.GetSASURL(sas.AccountResourceTypes{Object: true, Container: true}, + sas.AccountPermissions{List: true, Read: true}, + time.Now().Add(48*time.Hour), + &service.GetSASURLOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(sasKey).ToNot(BeEmpty()) + // the sdk returns the full SAS url e.g test.blob.core.windows.net/? + sasKey = strings.TrimPrefix(sasKey, testBucket.Spec.Endpoint+"/") + testSASKeySecret := corev1.Secret{ + Data: map[string][]byte{ + sasKeyField: []byte(sasKey), + }, + } + + sasKeyClient, err := NewClient(testBucket.DeepCopy(), WithSecret(testSASKeySecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + + // Test if bucket and blob exists using sasKey. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + + ok, err := sasKeyClient.BucketExists(ctx, testContainer) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + _, err = client.FGetObject(ctx, testContainer, testFile, localPath) + g.Expect(err).ToNot(HaveOccurred()) + _, err = sasKeyClient.FGetObject(ctx, testContainer, testFile, localPath) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(localPath).To(BeARegularFile()) + f, _ := os.ReadFile(localPath) + g.Expect(f).To(Equal([]byte(testFileData))) +} + +func TestBlobClientContainerSASKey_BucketExists(t *testing.T) { + g := NewWithT(t) + + // create a client with the shared key + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Create test blob. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) + + // use the container client to create a container-level SAS key for the account + cred, err := container.NewSharedKeyCredential(testAccountName, testAccountKey) + g.Expect(err).ToNot(HaveOccurred()) + url := fmt.Sprintf("https://%s.blob.core.windows.net/%s", testAccountName, testContainer) + containerClient, err := container.NewClientWithSharedKeyCredential(url, cred, nil) + g.Expect(err).ToNot(HaveOccurred()) + // sasKey + sasKey, err := containerClient.GetSASURL(sas.ContainerPermissions{Read: true, List: true}, + time.Now().Add(48*time.Hour), &container.GetSASURLOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(sasKey).ToNot(BeEmpty()) + + // the sdk returns the full SAS url e.g test.blob.core.windows.net//? + sasKey = strings.TrimPrefix(sasKey, testBucket.Spec.Endpoint+"/"+testContainer) + testSASKeySecret := corev1.Secret{ + Data: map[string][]byte{ + sasKeyField: []byte(sasKey), + }, + } + + sasKeyClient, err := NewClient(testBucket.DeepCopy(), WithSecret(testSASKeySecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + + // Test if bucket and blob exists using sasKey. + ok, err := sasKeyClient.BucketExists(ctx, testContainer) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + + // BucketExists returns an error if the bucket doesn't exist with container level SAS + // since the error code is AuthenticationFailed. + ok, err = sasKeyClient.BucketExists(ctx, "non-existent") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("Bucket name may be incorrect, it does not exist")) + g.Expect(ok).To(BeFalse()) +} + +func TestBlobClient_FGetObject_NotFoundErr(t *testing.T) { + g := NewWithT(t) + + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Test blob does not exist. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + _, err = client.FGetObject(ctx, testContainer, "doesnotexist.txt", "doesnotexist.txt") + + g.Expect(err).To(HaveOccurred()) + g.Expect(client.ObjectIsNotFound(err)).To(BeTrue()) +} + +func TestBlobClient_VisitObjects(t *testing.T) { + g := NewWithT(t) + + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Create test blobs. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) + g.Expect(createBlob(ctx, cred, testContainer, testFile2, testFile2Data)) + + visits := make(map[string]string) + + // Visit objects. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + got := client.VisitObjects(ctx, testContainer, "", func(path, etag string) error { + visits[path] = etag + return nil + }) + + g.Expect(got).To(Succeed()) + g.Expect(visits[testFile]).ToNot(BeEmpty()) + g.Expect(visits[testFile2]).ToNot(BeEmpty()) + g.Expect(visits[testFile]).ToNot(Equal(visits[testFile2])) +} + +func TestBlobClient_VisitObjects_CallbackErr(t *testing.T) { + g := NewWithT(t) + + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + // Generate test container name. + testContainer := generateString(testContainerGenerateName) + + // Create test container. + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createContainer(ctx, client, testContainer)).To(Succeed()) + t.Cleanup(func() { + g.Expect(deleteContainer(context.Background(), client, testContainer)).To(Succeed()) + }) + + // Create test blob. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + g.Expect(createBlob(ctx, cred, testContainer, testFile, testFileData)) + + // Visit object. + ctx, timeout = context.WithTimeout(context.Background(), testTimeout) + defer timeout() + mockErr := fmt.Errorf("mock") + err = client.VisitObjects(ctx, testContainer, "", func(path, etag string) error { + return mockErr + }) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("mock")) +} + +func createContainer(ctx context.Context, client *BlobClient, name string) error { + if _, err := client.CreateContainer(ctx, name, nil); err != nil { + var stgErr *azcore.ResponseError + if errors.As(err, &stgErr) { + if stgErr.ErrorCode == string(bloberror.ContainerAlreadyExists) { + return nil + } + err = stgErr + } + return err + } + return nil +} + +func createBlob(ctx context.Context, cred *blob.SharedKeyCredential, containerName, name, data string) error { + blobURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", testAccountName, containerName, name) + blobC, err := appendblob.NewClientWithSharedKeyCredential(blobURL, cred, nil) + if err != nil { + return err + } + ctx, timeout := context.WithTimeout(context.Background(), testTimeout) + defer timeout() + if _, err := blobC.Create(ctx, nil); err != nil { + return err + } + + hash := md5.Sum([]byte(data)) + + if _, err := blobC.AppendBlock(ctx, streaming.NopCloser(strings.NewReader(data)), &appendblob.AppendBlockOptions{ + TransactionalValidation: blob.TransferValidationTypeMD5(hash[:16]), + }); err != nil { + return err + } + return nil +} + +func deleteContainer(ctx context.Context, client *BlobClient, name string) error { + if _, err := client.DeleteContainer(ctx, name, nil); err != nil { + if bloberror.HasCode(err, bloberror.ContainerNotFound, bloberror.ContainerBeingDeleted) { + return nil + } + return err + } + return nil +} + +func generateString(prefix string) string { + randBytes := make([]byte, 16) + rand.Read(randBytes) + return prefix + hex.EncodeToString(randBytes) +} diff --git a/internal/bucket/azure/blob_test.go b/internal/bucket/azure/blob_test.go new file mode 100644 index 000000000..83f17e900 --- /dev/null +++ b/internal/bucket/azure/blob_test.go @@ -0,0 +1,556 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/http" + "net/url" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testlistener "github.com/fluxcd/source-controller/tests/listener" + testproxy "github.com/fluxcd/source-controller/tests/proxy" +) + +func TestNewClientAndBucketExistsWithProxy(t *testing.T) { + g := NewWithT(t) + + proxyAddr, _ := testproxy.New(t) + + // start mock bucket server + bucketListener, bucketAddr, _ := testlistener.New(t) + bucketEndpoint := fmt.Sprintf("http://%s", bucketAddr) + bucketHandler := http.NewServeMux() + bucketHandler.HandleFunc("GET /podinfo", func(w http.ResponseWriter, r *http.Request) { + // verify query params comp=list&maxresults=1&restype=container + q := r.URL.Query() + g.Expect(q.Get("comp")).To(Equal("list")) + g.Expect(q.Get("maxresults")).To(Equal("1")) + g.Expect(q.Get("restype")).To(Equal("container")) + // the azure library does not expose the struct for this response + // and copying its definition yields a strange "unsupported type" + // error when marshaling to xml, so we just hardcode a valid response + // here + resp := fmt.Sprintf(` + +1 + + +`, bucketEndpoint) + _, err := w.Write([]byte(resp)) + g.Expect(err).ToNot(HaveOccurred()) + }) + bucketServer := &http.Server{ + Addr: bucketAddr, + Handler: bucketHandler, + } + go bucketServer.Serve(bucketListener) + defer bucketServer.Shutdown(context.Background()) + + tests := []struct { + name string + endpoint string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + endpoint: bucketEndpoint, + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect proxy", + endpoint: bucketEndpoint, + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", 1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + bucket := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + Endpoint: tt.endpoint, + }, + } + + client, err := NewClient(t.Context(), + bucket, + WithProxyURL(tt.proxyURL), + withoutCredentials(), + withoutRetries()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + ok, err := client.BucketExists(context.Background(), "podinfo") + if tt.err != "" { + g.Expect(err).To(MatchError(ContainSubstring(tt.err))) + g.Expect(ok).To(BeFalse()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + } + }) + } +} + +func TestValidateSecret(t *testing.T) { + tests := []struct { + name string + secret *corev1.Secret + wantErr bool + }{ + { + name: "valid UserManagedIdentity Secret", + secret: &corev1.Secret{ + Data: map[string][]byte{ + clientIDField: []byte("some-client-id-"), + }, + }, + }, + { + name: "valid ServicePrincipal Certificate Secret", + secret: &corev1.Secret{ + Data: map[string][]byte{ + tenantIDField: []byte("some-tenant-id-"), + clientIDField: []byte("some-client-id-"), + clientCertificateField: []byte("some-certificate"), + }, + }, + }, + { + name: "valid ServicePrincipal Secret", + secret: &corev1.Secret{ + Data: map[string][]byte{ + tenantIDField: []byte("some-tenant-id-"), + clientIDField: []byte("some-client-id-"), + clientSecretField: []byte("some-client-secret-"), + }, + }, + }, + { + name: "valid SAS Key Secret", + secret: &corev1.Secret{ + Data: map[string][]byte{ + sasKeyField: []byte("?spr= 0 { + transport, err := minio.DefaultTransport(minioOpts.Secure) + if err != nil { + return nil, fmt.Errorf("failed to create default minio transport: %w", err) + } + for _, opt := range transportOpts { + opt(transport) + } + minioOpts.Transport = transport + } + + client, err := minio.New(bucket.Spec.Endpoint, &minioOpts) + if err != nil { + return nil, err + } + return &MinioClient{Client: client}, nil +} + +// newCredsFromSecret creates a new Minio credentials object from the provided +// secret. +func newCredsFromSecret(secret *corev1.Secret) *credentials.Credentials { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) + } + if accessKey != "" && secretKey != "" { + return credentials.NewStaticV4(accessKey, secretKey, "") + } + return nil +} + +// newAWSCreds creates a new Minio credentials object for `aws` bucket provider. +// +// This function is only called when Secret authentication is not available. +// +// Uses AWS SDK's config.LoadDefaultConfig() which supports: +// - Workload Identity (IRSA/EKS Pod Identity) +// - EC2 instance profiles +// - Environment variables +// - Shared credentials files +// - All other AWS SDK authentication methods +func newAWSCreds(ctx context.Context, o *options) (*credentials.Credentials, error) { + var opts auth.Options + opts.Apply(o.authOpts...) + + awsCredsProvider := awsauth.NewCredentialsProvider(ctx, o.authOpts...) + awsCreds, err := awsCredsProvider.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("AWS authentication failed: %w", err) + } + + return credentials.NewStaticV4( + awsCreds.AccessKeyID, + awsCreds.SecretAccessKey, + awsCreds.SessionToken, + ), nil +} + +// newGenericCreds creates a new Minio credentials object for the `generic` bucket provider. +func newGenericCreds(bucket *sourcev1.Bucket, o *options) *credentials.Credentials { + + sts := bucket.Spec.STS + if sts == nil { + return nil + } + + switch sts.Provider { + case sourcev1.STSProviderLDAP: + client := &http.Client{Transport: http.DefaultTransport} + if o.proxyURL != nil || o.stsTLSConfig != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + if o.proxyURL != nil { + transport.Proxy = http.ProxyURL(o.proxyURL) + } + if o.stsTLSConfig != nil { + transport.TLSClientConfig = o.stsTLSConfig.Clone() + } + client = &http.Client{Transport: transport} + } + var username, password string + if o.stsSecret != nil { + username = string(o.stsSecret.Data["username"]) + password = string(o.stsSecret.Data["password"]) + } + return credentials.New(&credentials.LDAPIdentity{ + Client: client, + STSEndpoint: sts.Endpoint, + LDAPUsername: username, + LDAPPassword: password, + }) + } + + return nil +} + +// ValidateSecret validates the credential secret. The provided Secret may +// be nil. +func ValidateSecret(secret *corev1.Secret) error { + if secret == nil { + return nil + } + err := fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + if _, ok := secret.Data["accesskey"]; !ok { + return err + } + if _, ok := secret.Data["secretkey"]; !ok { + return err + } + return nil +} + +// ValidateSTSProvider validates the STS provider. +func ValidateSTSProvider(bucketProvider string, sts *sourcev1.BucketSTSSpec) error { + errProviderIncompatbility := fmt.Errorf("STS provider '%s' is not supported for '%s' bucket provider", + sts.Provider, bucketProvider) + errSecretNotRequired := fmt.Errorf("spec.sts.secretRef is not required for the '%s' STS provider", + sts.Provider) + errCertSecretNotRequired := fmt.Errorf("spec.sts.certSecretRef is not required for the '%s' STS provider", + sts.Provider) + + switch bucketProvider { + case sourcev1.BucketProviderAmazon: + switch sts.Provider { + case sourcev1.STSProviderAmazon: + if sts.SecretRef != nil { + return errSecretNotRequired + } + if sts.CertSecretRef != nil { + return errCertSecretNotRequired + } + return nil + default: + return errProviderIncompatbility + } + case sourcev1.BucketProviderGeneric: + switch sts.Provider { + case sourcev1.STSProviderLDAP: + return nil + default: + return errProviderIncompatbility + } + } + + return fmt.Errorf("STS configuration is not supported for '%s' bucket provider", bucketProvider) +} + +// ValidateSTSSecret validates the STS secret. The provided Secret may be nil. +func ValidateSTSSecret(stsProvider string, secret *corev1.Secret) error { + switch stsProvider { + case sourcev1.STSProviderLDAP: + return validateSTSSecretForProvider(stsProvider, secret, "username", "password") + default: + return nil + } +} + +// validateSTSSecretForProvider validates the STS secret for each provider. +// The provided Secret may be nil. +func validateSTSSecretForProvider(stsProvider string, secret *corev1.Secret, keys ...string) error { + if secret == nil { + return nil + } + err := fmt.Errorf("invalid '%s' secret data for '%s' STS provider: required fields %s", + secret.Name, stsProvider, strings.Join(keys, ", ")) + if len(secret.Data) == 0 { + return err + } + for _, key := range keys { + value, ok := secret.Data[key] + if !ok || len(value) == 0 { + return err + } + } + return nil +} + +// FGetObject gets the object from the provided object storage bucket, and +// writes it to targetPath. +// It returns the etag of the successfully fetched file, or any error. +func (c *MinioClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { + stat, err := c.Client.StatObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + return "", err + } + opts := minio.GetObjectOptions{} + if err = opts.SetMatchETag(stat.ETag); err != nil { + return "", err + } + if err = c.Client.FGetObject(ctx, bucketName, objectName, localPath, opts); err != nil { + return "", err + } + return stat.ETag, nil +} + +// VisitObjects iterates over the items in the provided object storage +// bucket, calling visit for every item. +// If the underlying client or the visit callback returns an error, +// it returns early. +func (c *MinioClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error { + for object := range c.Client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{ + Recursive: true, + Prefix: prefix, + UseV1: s3utils.IsGoogleEndpoint(*c.Client.EndpointURL()), + }) { + if object.Err != nil { + err := fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, object.Err) + return err + } + + if err := visit(object.Key, object.ETag); err != nil { + return err + } + } + return nil +} + +// ObjectIsNotFound checks if the error provided is a minio.ErrResponse +// with "NoSuchKey" code. +func (c *MinioClient) ObjectIsNotFound(err error) bool { + if resp := new(minio.ErrorResponse); errors.As(err, resp) { + return resp.Code == "NoSuchKey" + } + return false +} + +// Close closes the Minio Client and logs any useful errors. +func (c *MinioClient) Close(_ context.Context) { + // Minio client does not provide a close method +} diff --git a/internal/bucket/minio/minio_test.go b/internal/bucket/minio/minio_test.go new file mode 100644 index 000000000..d6ba7baa4 --- /dev/null +++ b/internal/bucket/minio/minio_test.go @@ -0,0 +1,825 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package minio + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/xml" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/uuid" + miniov7 "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + . "github.com/onsi/gomega" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testlistener "github.com/fluxcd/source-controller/tests/listener" + testproxy "github.com/fluxcd/source-controller/tests/proxy" +) + +const ( + objectName string = "test.yaml" + objectEtag string = "b07bba5a280b58791bc78fb9fc414b09" +) + +var ( + // testMinioVersion is the version (image tag) of the Minio server image + // used to test against. + testMinioVersion = "RELEASE.2024-05-07T06-41-25Z" + // testMinioRootUser is the root user of the Minio server. + testMinioRootUser = "fluxcd" + // testMinioRootPassword is the root password of the Minio server. + testMinioRootPassword = "passw0rd!" + // testVaultAddress is the address of the Minio server, it is set + // by TestMain after booting it. + testMinioAddress string + // testMinioClient is the Minio client used to test against, it is set + // by TestMain after booting the Minio server. + testMinioClient *MinioClient + // testTLSConfig is the TLS configuration used to connect to the Minio server. + testTLSConfig *tls.Config + // testServerCert is the path to the server certificate used to start the Minio + // and STS servers. + testServerCert string + // testServerKey is the path to the server key used to start the Minio and STS servers. + testServerKey string + // ctx is the common context used in tests. + ctx context.Context +) + +var ( + bucketName = "test-bucket-minio" + uuid.New().String() + prefix = "" + secret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + emptySecret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-secret", + Namespace: "default", + }, + Data: map[string][]byte{}, + Type: "Opaque", + } + bucket = sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Provider: "generic", + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + bucketAwsProvider = sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Provider: "aws", + }, + } +) + +func TestMain(m *testing.M) { + // Initialize common test context + ctx = context.Background() + + // Uses a sensible default on Windows (TCP/HTTP) and Linux/MacOS (socket) + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("could not connect to docker: %s", err) + } + + // Load a private key and certificate from a self-signed CA for the Minio server and + // a client TLS configuration to connect to the Minio server. + testServerCert, testServerKey, testTLSConfig, err = loadServerCertAndClientTLSConfig() + if err != nil { + log.Fatalf("could not load server cert and client TLS config: %s", err) + } + + // Pull the image, create a container based on it, and run it + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "minio/minio", + Tag: testMinioVersion, + ExposedPorts: []string{ + "9000/tcp", + "9001/tcp", + }, + Env: []string{ + "MINIO_ROOT_USER=" + testMinioRootUser, + "MINIO_ROOT_PASSWORD=" + testMinioRootPassword, + }, + Cmd: []string{"server", "/data", "--console-address", ":9001"}, + Mounts: []string{ + fmt.Sprintf("%s:/root/.minio/certs/public.crt", testServerCert), + fmt.Sprintf("%s:/root/.minio/certs/private.key", testServerKey), + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + }) + if err != nil { + log.Fatalf("could not start resource: %s", err) + } + + purgeResource := func() { + if err := pool.Purge(resource); err != nil { + log.Printf("could not purge resource: %s", err) + } + } + + // Set the address of the Minio server used for testing. + testMinioAddress = fmt.Sprintf("127.0.0.1:%v", resource.GetPort("9000/tcp")) + + // Construct a Minio client using the address of the Minio server. + testMinioClient, err = NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + if err != nil { + log.Fatalf("cannot create Minio client: %s", err) + } + + // Wait until Minio is ready to serve requests... + if err := pool.Retry(func() error { + hCancel, err := testMinioClient.HealthCheck(1 * time.Second) + if err != nil { + log.Fatalf("cannot start Minio health check: %s", err) + } + defer hCancel() + + if !testMinioClient.IsOnline() { + return fmt.Errorf("client is offline: Minio is not ready") + } + return nil + }); err != nil { + purgeResource() + log.Fatalf("could not connect to docker: %s", err) + } + + createBucket(ctx) + addObjectToBucket(ctx) + run := m.Run() + removeObjectFromBucket(ctx) + deleteBucket(ctx) + purgeResource() + os.Exit(run) +} + +func TestNewClient(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) +} + +func TestNewClientEmptySecret(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(emptySecret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) +} + +func TestNewClientAWSProvider(t *testing.T) { + t.Run("with secret", func(t *testing.T) { + validSecret := corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket, WithSecret(&validSecret)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + }) + + t.Run("without secret", func(t *testing.T) { + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("AWS authentication failed")) + g.Expect(minioClient).To(BeNil()) + }) +} + +func TestBucketExists(t *testing.T) { + exists, err := testMinioClient.BucketExists(ctx, bucketName) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) +} + +func TestBucketNotExists(t *testing.T) { + exists, err := testMinioClient.BucketExists(ctx, "notexistsbucket") + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) +} + +func TestFGetObject(t *testing.T) { + tempDir := t.TempDir() + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + _, err := testMinioClient.FGetObject(ctx, bucketName, objectName, path) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { + var credsRetrieved bool + + // start a mock LDAP STS server + ldapSTSListener, ldapSTSAddr, _ := testlistener.New(t) + ldapSTSEndpoint := fmt.Sprintf("https://%s", ldapSTSAddr) + ldapSTSHandler := http.NewServeMux() + var ldapUsername, ldapPassword string + ldapSTSHandler.HandleFunc("POST /", + func(w http.ResponseWriter, r *http.Request) { + g := NewWithT(t) + err := r.ParseForm() + g.Expect(err).NotTo(HaveOccurred()) + username := r.Form.Get("LDAPUsername") + password := r.Form.Get("LDAPPassword") + g.Expect(username).To(Equal(ldapUsername)) + g.Expect(password).To(Equal(ldapPassword)) + var result credentials.LDAPIdentityResult + result.Credentials.AccessKey = testMinioRootUser + result.Credentials.SecretKey = testMinioRootPassword + err = xml.NewEncoder(w).Encode(credentials.AssumeRoleWithLDAPResponse{Result: result}) + g.Expect(err).NotTo(HaveOccurred()) + credsRetrieved = true + }) + ldapSTSServer := &http.Server{ + Addr: ldapSTSAddr, + Handler: ldapSTSHandler, + } + go ldapSTSServer.ServeTLS(ldapSTSListener, testServerCert, testServerKey) + defer ldapSTSServer.Shutdown(ctx) + + // start proxy + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + provider string + stsSpec *sourcev1.BucketSTSSpec + opts []Option + ldapUsername string + ldapPassword string + err string + }{ + { + name: "with correct ldap endpoint", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{WithSTSTLSConfig(testTLSConfig)}, + }, + { + name: "with incorrect ldap endpoint", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: fmt.Sprintf("http://localhost:%d", 1), + }, + err: "connection refused", + }, + { + name: "with correct ldap endpoint and secret", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithSTSTLSConfig(testTLSConfig), + WithSTSSecret(&corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("password"), + }, + }), + }, + ldapUsername: "user", + ldapPassword: "password", + }, + { + name: "with correct ldap endpoint and proxy", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithProxyURL(&url.URL{Scheme: "http", Host: proxyAddr}), + WithSTSTLSConfig(testTLSConfig), + }, + }, + { + name: "with correct ldap endpoint and incorrect proxy", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithProxyURL(&url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}), + }, + err: "connection refused", + }, + { + name: "with correct ldap endpoint and without client tls config", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + err: "tls: failed to verify certificate", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + credsRetrieved = false + ldapUsername = tt.ldapUsername + ldapPassword = tt.ldapPassword + + bucket := bucketStub(bucket, testMinioAddress) + bucket.Spec.Provider = tt.provider + bucket.Spec.STS = tt.stsSpec + + opts := tt.opts + opts = append(opts, WithTLSConfig(testTLSConfig)) + + minioClient, err := NewClient(ctx, bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + + path := filepath.Join(t.TempDir(), sourceignore.IgnoreFile) + _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(credsRetrieved).To(BeTrue()) + } + }) + } +} + +func TestNewClientAndFGetObjectWithProxy(t *testing.T) { + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + proxyURL *url.URL + errSubstring string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + errSubstring: "connection refused", + }, + } + + // run test + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig), + WithProxyURL(tt.proxyURL)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + tempDir := t.TempDir() + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) + if tt.errSubstring != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.errSubstring)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestFGetObjectNotExists(t *testing.T) { + tempDir := t.TempDir() + badKey := "invalid.txt" + path := filepath.Join(tempDir, badKey) + _, err := testMinioClient.FGetObject(ctx, bucketName, badKey, path) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("The specified key does not exist.")) + g.Expect(testMinioClient.ObjectIsNotFound(err)).To(BeTrue()) +} + +func TestVisitObjects(t *testing.T) { + keys := []string{} + etags := []string{} + err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { + keys = append(keys, key) + etags = append(etags, etag) + return nil + }) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) +} + +func TestVisitObjectsErr(t *testing.T) { + badBucketName := "bad-bucket" + err := testMinioClient.VisitObjects(ctx, badBucketName, prefix, func(string, string) error { + return nil + }) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName))) +} + +func TestVisitObjectsCallbackErr(t *testing.T) { + mockErr := fmt.Errorf("mock") + err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { + return mockErr + }) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) +} + +func TestValidateSecret(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + secret *corev1.Secret + error bool + }{ + { + name: "valid secret", + secret: secret.DeepCopy(), + }, + { + name: "nil secret", + secret: nil, + }, + { + name: "invalid secret", + secret: emptySecret.DeepCopy(), + error: true, + }, + } + for _, testCase := range testCases { + tt := testCase + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + err := ValidateSecret(tt.secret) + if tt.error { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name))) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestValidateSTSProvider(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + bucketProvider string + stsProvider string + withSecret bool + withCertSecret bool + err string + }{ + { + name: "aws", + bucketProvider: "aws", + stsProvider: "aws", + }, + { + name: "aws does not require a secret", + bucketProvider: "aws", + stsProvider: "aws", + withSecret: true, + err: "spec.sts.secretRef is not required for the 'aws' STS provider", + }, + { + name: "aws does not require a cert secret", + bucketProvider: "aws", + stsProvider: "aws", + withCertSecret: true, + err: "spec.sts.certSecretRef is not required for the 'aws' STS provider", + }, + { + name: "ldap", + bucketProvider: "generic", + stsProvider: "ldap", + }, + { + name: "ldap may use a secret", + bucketProvider: "generic", + stsProvider: "ldap", + withSecret: true, + }, + { + name: "ldap may use a cert secret", + bucketProvider: "generic", + stsProvider: "ldap", + withCertSecret: true, + }, + { + name: "ldap sts provider unsupported for aws bucket provider", + bucketProvider: "aws", + stsProvider: "ldap", + err: "STS provider 'ldap' is not supported for 'aws' bucket provider", + }, + { + name: "aws sts provider unsupported for generic bucket provider", + bucketProvider: "generic", + stsProvider: "aws", + err: "STS provider 'aws' is not supported for 'generic' bucket provider", + }, + { + name: "unsupported bucket provider", + bucketProvider: "gcp", + stsProvider: "ldap", + err: "STS configuration is not supported for 'gcp' bucket provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + sts := &sourcev1.BucketSTSSpec{ + Provider: tt.stsProvider, + } + if tt.withSecret { + sts.SecretRef = &meta.LocalObjectReference{} + } + if tt.withCertSecret { + sts.CertSecretRef = &meta.LocalObjectReference{} + } + g := NewWithT(t) + err := ValidateSTSProvider(tt.bucketProvider, sts) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestValidateSTSSecret(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + provider string + secret *corev1.Secret + err string + }{ + { + name: "ldap provider does not require a secret", + provider: "ldap", + }, + { + name: "valid ldap secret", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + }, + { + name: "empty ldap secret", + provider: "ldap", + secret: &corev1.Secret{ObjectMeta: v1.ObjectMeta{Name: "ldap-secret"}}, + err: "invalid 'ldap-secret' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret missing password", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret missing username", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "password": []byte("pass"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret with empty username", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte(""), + "password": []byte("pass"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret with empty password", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte(""), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + err := ValidateSTSSecret(tt.provider, tt.secret) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func bucketStub(bucket sourcev1.Bucket, endpoint string) *sourcev1.Bucket { + b := bucket.DeepCopy() + b.Spec.Endpoint = endpoint + b.Spec.Insecure = false + return b +} + +func createBucket(ctx context.Context) { + if err := testMinioClient.Client.MakeBucket(ctx, bucketName, miniov7.MakeBucketOptions{}); err != nil { + exists, errBucketExists := testMinioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + deleteBucket(ctx) + } else { + log.Fatalf("could not create bucket: %s", err) + } + } +} + +func deleteBucket(ctx context.Context) { + if err := testMinioClient.Client.RemoveBucket(ctx, bucketName); err != nil { + log.Println(err) + } +} + +func addObjectToBucket(ctx context.Context) { + fileReader := strings.NewReader(getObjectFile()) + fileSize := fileReader.Size() + _, err := testMinioClient.Client.PutObject(ctx, bucketName, objectName, fileReader, fileSize, miniov7.PutObjectOptions{ + ContentType: "text/x-yaml", + }) + if err != nil { + log.Println(err) + } +} + +func removeObjectFromBucket(ctx context.Context) { + if err := testMinioClient.Client.RemoveObject(ctx, bucketName, objectName, miniov7.RemoveObjectOptions{ + GovernanceBypass: true, + }); err != nil { + log.Println(err) + } +} + +func getObjectFile() string { + return ` + apiVersion: source.toolkit.fluxcd.io/v1 + kind: Bucket + metadata: + name: podinfo + namespace: default + spec: + interval: 5m + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s + ` +} + +func loadServerCertAndClientTLSConfig() (serverCert string, serverKey string, clientConf *tls.Config, err error) { + const certsDir = "../../controller/testdata/certs" + clientConf = &tls.Config{} + + serverCert, err = filepath.Abs(filepath.Join(certsDir, "server.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to get server cert path: %w", err) + } + serverKey, err = filepath.Abs(filepath.Join(certsDir, "server-key.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to get server key path: %w", err) + } + + b, err := os.ReadFile(filepath.Join(certsDir, "ca.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to load CA: %w", err) + } + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(b) { + return "", "", nil, errors.New("failed to append CA to pool") + } + clientConf.RootCAs = caPool + + clientCert := filepath.Join(certsDir, "client.pem") + clientKey := filepath.Join(certsDir, "client-key.pem") + client, err := tls.LoadX509KeyPair(clientCert, clientKey) + if err != nil { + return "", "", nil, fmt.Errorf("failed to load client cert and key: %w", err) + } + clientConf.Certificates = []tls.Certificate{client} + + return +} diff --git a/internal/cache/LICENSE b/internal/cache/LICENSE new file mode 100644 index 000000000..f49969d7f --- /dev/null +++ b/internal/cache/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012-2019 Patrick Mylund Nielsen and the go-cache contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/internal/cache/cache.go b/internal/cache/cache.go new file mode 100644 index 000000000..6f8ee8608 --- /dev/null +++ b/internal/cache/cache.go @@ -0,0 +1,245 @@ +// Copyright (c) 2012-2019 Patrick Mylund Nielsen and the go-cache contributors +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Copyright 2022 The FluxCD contributors. All rights reserved. +// This package provides an in-memory cache +// derived from the https://github.com/patrickmn/go-cache +// package +// It has been modified in order to keep a small set of functions +// and to add a maxItems parameter in order to limit the number of, +// and thus the size of, items in the cache. + +package cache + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +// Cache is a thread-safe in-memory key/value store. +type Cache struct { + *cache +} + +// Item is an item stored in the cache. +type Item struct { + // Object is the item's value. + Object interface{} + // Expiration is the item's expiration time. + Expiration int64 +} + +type cache struct { + // Items holds the elements in the cache. + Items map[string]Item + // MaxItems is the maximum number of items the cache can hold. + MaxItems int + mu sync.RWMutex + janitor *janitor +} + +// ItemCount returns the number of items in the cache. +// This may include items that have expired, but have not yet been cleaned up. +func (c *cache) ItemCount() int { + c.mu.RLock() + n := len(c.Items) + c.mu.RUnlock() + return n +} + +func (c *cache) set(key string, value interface{}, expiration time.Duration) { + var e int64 + if expiration > 0 { + e = time.Now().Add(expiration).UnixNano() + } + + c.Items[key] = Item{ + Object: value, + Expiration: e, + } +} + +// Set adds an item to the cache, replacing any existing item. +// If expiration is zero, the item never expires. +// If the cache is full, Set will return an error. +func (c *cache) Set(key string, value interface{}, expiration time.Duration) error { + c.mu.Lock() + _, found := c.Items[key] + if found { + c.set(key, value, expiration) + c.mu.Unlock() + return nil + } + + if c.MaxItems > 0 && len(c.Items) < c.MaxItems { + c.set(key, value, expiration) + c.mu.Unlock() + return nil + } + + c.mu.Unlock() + return fmt.Errorf("Cache is full") +} + +// Add an item to the cache, existing items will not be overwritten. +// To overwrite existing items, use Set. +// If the cache is full, Add will return an error. +func (c *cache) Add(key string, value interface{}, expiration time.Duration) error { + c.mu.Lock() + _, found := c.Items[key] + if found { + c.mu.Unlock() + return fmt.Errorf("Item %s already exists", key) + } + + if c.MaxItems > 0 && len(c.Items) < c.MaxItems { + c.set(key, value, expiration) + c.mu.Unlock() + return nil + } + + c.mu.Unlock() + return fmt.Errorf("Cache is full") +} + +// Get an item from the cache. Returns the item or nil, and a bool indicating +// whether the key was found. +func (c *cache) Get(key string) (interface{}, bool) { + c.mu.RLock() + item, found := c.Items[key] + if !found { + c.mu.RUnlock() + return nil, false + } + if item.Expiration > 0 { + if item.Expiration < time.Now().UnixNano() { + c.mu.RUnlock() + return nil, false + } + } + c.mu.RUnlock() + return item.Object, true +} + +// Delete an item from the cache. Does nothing if the key is not in the cache. +func (c *cache) Delete(key string) { + c.mu.Lock() + delete(c.Items, key) + c.mu.Unlock() +} + +// Clear all items from the cache. +// This reallocates the underlying array holding the items, +// so that the memory used by the items is reclaimed. +func (c *cache) Clear() { + c.mu.Lock() + c.Items = make(map[string]Item) + c.mu.Unlock() +} + +// HasExpired returns true if the item has expired. +func (c *cache) HasExpired(key string) bool { + c.mu.RLock() + item, ok := c.Items[key] + if !ok { + c.mu.RUnlock() + return true + } + if item.Expiration > 0 { + if item.Expiration < time.Now().UnixNano() { + c.mu.RUnlock() + return true + } + } + c.mu.RUnlock() + return false +} + +// SetExpiration sets the expiration for the given key. +// Does nothing if the key is not in the cache. +func (c *cache) SetExpiration(key string, expiration time.Duration) { + c.mu.Lock() + item, ok := c.Items[key] + if ok { + item.Expiration = time.Now().Add(expiration).UnixNano() + c.Items[key] = item + } + c.mu.Unlock() +} + +// GetExpiration returns the expiration for the given key. +// Returns zero if the key is not in the cache or the item +// has already expired. +func (c *cache) GetExpiration(key string) time.Duration { + c.mu.RLock() + item, ok := c.Items[key] + if !ok { + c.mu.RUnlock() + return 0 + } + if item.Expiration > 0 { + if item.Expiration < time.Now().UnixNano() { + c.mu.RUnlock() + return 0 + } + } + c.mu.RUnlock() + return time.Duration(item.Expiration - time.Now().UnixNano()) +} + +// DeleteExpired deletes all expired items from the cache. +func (c *cache) DeleteExpired() { + c.mu.Lock() + for k, v := range c.Items { + if v.Expiration > 0 && v.Expiration < time.Now().UnixNano() { + delete(c.Items, k) + } + } + c.mu.Unlock() +} + +type janitor struct { + interval time.Duration + stop chan bool +} + +func (j *janitor) run(c *cache) { + ticker := time.NewTicker(j.interval) + for { + select { + case <-ticker.C: + c.DeleteExpired() + case <-j.stop: + ticker.Stop() + return + } + } +} + +func stopJanitor(c *Cache) { + c.janitor.stop <- true +} + +// New creates a new cache with the given configuration. +func New(maxItems int, interval time.Duration) *Cache { + c := &cache{ + Items: make(map[string]Item), + MaxItems: maxItems, + janitor: &janitor{ + interval: interval, + stop: make(chan bool), + }, + } + + C := &Cache{c} + + if interval > 0 { + go c.janitor.run(c) + runtime.SetFinalizer(C, stopJanitor) + } + + return C +} diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go new file mode 100644 index 000000000..e6d3d6ac6 --- /dev/null +++ b/internal/cache/cache_test.go @@ -0,0 +1,139 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" +) + +func TestCache(t *testing.T) { + g := NewWithT(t) + // create a cache that can hold 2 items and have no cleanup + cache := New(2, 0) + + // Get an Item from the cache + if _, found := cache.Get("key1"); found { + t.Error("Item should not be found") + } + + // Add an item to the cache + err := cache.Add("key1", "value1", 0) + g.Expect(err).ToNot(HaveOccurred()) + + // Get the item from the cache + item, found := cache.Get("key1") + g.Expect(found).To(BeTrue()) + g.Expect(item).To(Equal("value1")) + + // Add another item to the cache + err = cache.Add("key2", "value2", 0) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cache.ItemCount()).To(Equal(2)) + + // Get the item from the cache + item, found = cache.Get("key2") + g.Expect(found).To(BeTrue()) + g.Expect(item).To(Equal("value2")) + + //Add an item to the cache + err = cache.Add("key3", "value3", 0) + g.Expect(err).To(HaveOccurred()) + + // Replace an item in the cache + err = cache.Set("key2", "value3", 0) + g.Expect(err).ToNot(HaveOccurred()) + + // Get the item from the cache + item, found = cache.Get("key2") + g.Expect(found).To(BeTrue()) + g.Expect(item).To(Equal("value3")) + + // new cache with a cleanup interval of 1 second + cache = New(2, 1*time.Second) + + // Add an item to the cache + err = cache.Add("key1", "value1", 2*time.Second) + g.Expect(err).ToNot(HaveOccurred()) + + // Get the item from the cache + item, found = cache.Get("key1") + g.Expect(found).To(BeTrue()) + g.Expect(item).To(Equal("value1")) + + // wait for the item to expire + time.Sleep(3 * time.Second) + + // Get the item from the cache + item, found = cache.Get("key1") + g.Expect(found).To(BeFalse()) + g.Expect(item).To(BeNil()) +} + +func TestCacheExpiration(t *testing.T) { + g := NewWithT(t) + cache := New(10, 0) + + key := "testKey" + value := "testValue" + expiration := 1 * time.Second + + err := cache.Add(key, value, expiration) + g.Expect(err).ToNot(HaveOccurred()) + + newExpiration := 2 * time.Second + cache.SetExpiration(key, newExpiration) + actualExpiration := cache.GetExpiration(key) + + g.Expect(actualExpiration).Should(BeNumerically("~", newExpiration, 100*time.Millisecond)) + + g.Expect(cache.HasExpired(key)).To(BeFalse()) + + time.Sleep(newExpiration + 100*time.Millisecond) + + g.Expect(cache.HasExpired(key)).To(BeTrue()) + + g.Expect(cache.GetExpiration(key)).To(BeZero()) + + nonExistentKey := "nonExistent" + cache.SetExpiration(nonExistentKey, 1*time.Second) + g.Expect(cache.GetExpiration(nonExistentKey)).To(BeZero()) + + g.Expect(cache.HasExpired(nonExistentKey)).To(BeTrue()) +} + +func TestCacheDeleteClear(t *testing.T) { + g := NewWithT(t) + cache := New(3, 0) + + err := cache.Add("key1", "value1", 0) + g.Expect(err).ToNot(HaveOccurred()) + err = cache.Add("key2", "value2", 0) + g.Expect(err).ToNot(HaveOccurred()) + err = cache.Add("key3", "value3", 0) + g.Expect(err).ToNot(HaveOccurred()) + + cache.Delete("key2") + _, found := cache.Get("key2") + g.Expect(found).To(BeFalse()) + g.Expect(cache.ItemCount()).To(Equal(2)) + + cache.Clear() + g.Expect(cache.ItemCount()).To(Equal(0)) +} diff --git a/internal/cache/metrics.go b/internal/cache/metrics.go new file mode 100644 index 000000000..09b43ec5b --- /dev/null +++ b/internal/cache/metrics.go @@ -0,0 +1,81 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +const ( + // CacheEventTypeMiss is the event type for cache misses. + CacheEventTypeMiss = "cache_miss" + // CacheEventTypeHit is the event type for cache hits. + CacheEventTypeHit = "cache_hit" +) + +// CacheRecorder is a recorder for cache events. +type CacheRecorder struct { + // cacheEventsCounter is a counter for cache events. + cacheEventsCounter *prometheus.CounterVec +} + +// NewCacheRecorder returns a new CacheRecorder. +// The configured labels are: event_type, name, namespace. +// The event_type is one of: +// - "miss" +// - "hit" +// - "update" +// +// The name is the name of the reconciled resource. +// The namespace is the namespace of the reconciled resource. +func NewCacheRecorder() *CacheRecorder { + return &CacheRecorder{ + cacheEventsCounter: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "gotk_cache_events_total", + Help: "Total number of cache retrieval events for a Gitops Toolkit resource reconciliation.", + }, + []string{"event_type", "name", "namespace"}, + ), + } +} + +// Collectors returns the metrics.Collector objects for the CacheRecorder. +func (r *CacheRecorder) Collectors() []prometheus.Collector { + return []prometheus.Collector{ + r.cacheEventsCounter, + } +} + +// IncCacheEventCount increment by 1 the cache event count for the given event type, name and namespace. +func (r *CacheRecorder) IncCacheEvents(event, name, namespace string) { + r.cacheEventsCounter.WithLabelValues(event, name, namespace).Inc() +} + +// DeleteCacheEvent deletes the cache event metric. +func (r *CacheRecorder) DeleteCacheEvent(event, name, namespace string) { + r.cacheEventsCounter.DeleteLabelValues(event, name, namespace) +} + +// MustMakeMetrics creates a new CacheRecorder, and registers the metrics collectors in the controller-runtime metrics registry. +func MustMakeMetrics() *CacheRecorder { + r := NewCacheRecorder() + metrics.Registry.MustRegister(r.Collectors()...) + + return r +} diff --git a/internal/controller/artifact.go b/internal/controller/artifact.go new file mode 100644 index 000000000..bebc8d5ae --- /dev/null +++ b/internal/controller/artifact.go @@ -0,0 +1,41 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "github.com/fluxcd/pkg/apis/meta" +) + +type artifactSet []*meta.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} diff --git a/internal/controller/artifact_matchers_test.go b/internal/controller/artifact_matchers_test.go new file mode 100644 index 000000000..af716e086 --- /dev/null +++ b/internal/controller/artifact_matchers_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + + "github.com/fluxcd/pkg/apis/meta" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *meta.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *meta.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*meta.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Size).Match(actualArtifact.Size); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} diff --git a/internal/controller/artifact_test.go b/internal/controller/artifact_test.go new file mode 100644 index 000000000..d40548c3c --- /dev/null +++ b/internal/controller/artifact_test.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "testing" +) + +func Test_artifactSet_Diff(t *testing.T) { + tests := []struct { + name string + current artifactSet + updated artifactSet + expected bool + }{ + { + name: "one artifact, no diff", + current: artifactSet{ + { + Revision: "foo", + }, + }, + updated: artifactSet{ + { + Revision: "foo", + }, + }, + expected: false, + }, + { + name: "one artifact, diff", + current: artifactSet{ + { + Revision: "foo", + }, + }, + updated: artifactSet{ + { + Revision: "bar", + }, + }, + expected: true, + }, + { + name: "multiple artifacts, no diff", + current: artifactSet{ + { + Revision: "foo", + }, + { + Revision: "bar", + }, + }, + updated: artifactSet{ + { + Revision: "foo", + }, + { + Revision: "bar", + }, + }, + expected: false, + }, + { + name: "multiple artifacts, diff", + current: artifactSet{ + { + Revision: "foo", + }, + { + Revision: "bar", + }, + }, + updated: artifactSet{ + { + Revision: "foo", + }, + { + Revision: "baz", + }, + }, + expected: true, + }, + { + name: "different artifact count", + current: artifactSet{ + { + Revision: "foo", + }, + { + Revision: "bar", + }, + }, + updated: artifactSet{ + { + Revision: "foo", + }, + }, + expected: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.current.Diff(tt.updated) + if result != tt.expected { + t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) + } + }) + } +} diff --git a/internal/controller/bucket_controller.go b/internal/controller/bucket_controller.go new file mode 100644 index 000000000..7fe881be6 --- /dev/null +++ b/internal/controller/bucket_controller.go @@ -0,0 +1,985 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/bucket/azure" + "github.com/fluxcd/source-controller/internal/bucket/gcp" + "github.com/fluxcd/source-controller/internal/bucket/minio" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/index" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +// maxConcurrentBucketFetches is the upper bound on the goroutines used to +// fetch bucket objects. It's important to have a bound, to avoid +// using arbitrary amounts of memory; the actual number is chosen +// according to the queueing rule of thumb with some conservative +// parameters: +// s > Nr / T +// N (number of requestors, i.e., objects to fetch) = 10000 +// r (service time -- fetch duration) = 0.01s (~ a megabyte file over 1Gb/s) +// T (total time available) = 1s +// -> s > 100 +const maxConcurrentBucketFetches = 100 + +// bucketReadyCondition contains the information required to summarize a +// v1.Bucket Ready Condition. +var bucketReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// bucketFailConditions contains the conditions that represent a failure. +var bucketFailConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.StorageOperationFailedCondition, +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create + +// BucketReconciler reconciles a v1.Bucket object. +type BucketReconciler struct { + client.Client + kuberecorder.EventRecorder + helper.Metrics + + Storage *storage.Storage + ControllerName string + TokenCache *cache.TokenCache + + patchOptions []patch.Option +} + +type BucketReconcilerOptions struct { + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// BucketProvider is an interface for fetching objects from a storage provider +// bucket. +type BucketProvider interface { + // BucketExists returns if an object storage bucket with the provided name + // exists, or returns a (client) error. + BucketExists(ctx context.Context, bucketName string) (bool, error) + // FGetObject gets the object from the provided object storage bucket, and + // writes it to targetPath. + // It returns the etag of the successfully fetched file, or any error. + FGetObject(ctx context.Context, bucketName, objectKey, targetPath string) (etag string, err error) + // VisitObjects iterates over the items in the provided object storage + // bucket, calling visit for every item. + // If the underlying client or the visit callback returns an error, + // it returns early. + VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error + // ObjectIsNotFound returns true if the given error indicates an object + // could not be found. + ObjectIsNotFound(error) bool + // Close closes the provider's client, if supported. + Close(context.Context) +} + +// bucketCredentials contains all credentials and configuration needed for bucket providers. +type bucketCredentials struct { + secret *corev1.Secret + proxyURL *url.URL + tlsConfig *tls.Config + stsSecret *corev1.Secret + stsTLSConfig *tls.Config +} + +// bucketReconcileFunc is the function type for all the v1.Bucket +// (sub)reconcile functions. The type implementations are grouped and +// executed serially to perform the complete reconcile of the object. +type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) + +func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) +} + +func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error { + r.patchOptions = getPatchOptions(bucketReadyCondition.Owned, r.ControllerName) + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.Bucket{}). + WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(bucketReadyCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition between init + // and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []bucketReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the bucketReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var recAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + recAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case recAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer func() { + if err = os.RemoveAll(tmpDir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") + } + }() + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + + // Run the sub-reconcilers and build the result of reconciliation. + var ( + res sreconcile.Result + resErr error + index = index.NewDigester() + ) + + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, index, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, index, res, resErr) + + return res, resErr +} + +// notify emits notification related to the reconciliation. +func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact and recovery from any + // failure. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + message := fmt.Sprintf("stored artifact with %d fetched files from '%s' bucket", index.Len(), newObj.Spec.BucketName) + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + "NewArtifact", message) + ctrl.LoggerFrom(ctx).Info(message) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, bucketFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) + } + } + } +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of any URL in the Status of the object are updated, to ensure +// they match the Storage server hostname of current runtime. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource fetches the upstream bucket contents with the client for the +// given object's Provider, and returns the result. +// When a SecretRef is defined, it attempts to fetch the Secret before calling +// the provider. If this fails, it records v1.FetchFailedCondition=True on +// the object and returns early. +func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { + usesObjectLevelWorkloadIdentity := obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.BucketProviderGeneric && obj.Spec.ServiceAccountName != "" + if usesObjectLevelWorkloadIdentity { + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + e := serror.NewStalling(err, meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + creds, err := r.setupCredentials(ctx, obj) + if err != nil { + e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + provider, err := r.createBucketProvider(ctx, obj, creds) + if err != nil { + var stallingErr *serror.Stalling + var genericErr *serror.Generic + if errors.As(err, &stallingErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, stallingErr.Reason, "%s", stallingErr) + return sreconcile.ResultEmpty, stallingErr + } else if errors.As(err, &genericErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, genericErr.Reason, "%s", genericErr) + return sreconcile.ResultEmpty, genericErr + } else { + e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + changed, err := r.syncBucketArtifacts(ctx, provider, obj, index, dir) + if err != nil { + e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Update artifact status if changes were detected + if changed { + revision := index.Digest(intdigest.Canonical) + message := fmt.Sprintf("new upstream revision '%s'", revision) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + return sreconcile.ResultEmpty, err + } + } + + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact does not differ from the object's current, it returns +// early. +// On a successful archive, the Artifact in the Status of the object is set, +// and the symlink in the Storage is updated to its path. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { + // Calculate revision + revision := index.Digest(intdigest.Canonical) + + // Create artifact + artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision.String(), fmt.Sprintf("%s.tar.gz", revision.Encoded())) + + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" { + curRev := digest.Digest(curArtifact.Revision) + if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact: revision '%s'", artifact.Revision) + } + } + }() + + // The artifact is up-to-date + if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" { + curRev := digest.Digest(curArtifact.Revision) + if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to stat source path: %w", err), + sourcev1.StatOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } else if !f.IsDir() { + e := serror.NewGeneric( + fmt.Errorf("source path '%s' is not a directory", dir), + sourcev1.InvalidPathReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) + } + defer unlock() + + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, nil); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to archive artifact to storage: %s", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.ObservedIgnore = obj.Spec.Ignore + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to update status URL symlink: %s", err) + } + if url != "" { + obj.Status.URL = url + } + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.BucketKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless the +// deletion timestamp on the object is set. Which will result in the +// removal of all Artifacts for the objects. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %s", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + r.annotatedEventLogf(ctx, obj, nil, eventType, reason, messageFmt, args...) +} + +// annotatedEventLogf records annotated events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *BucketReconciler) annotatedEventLogf(ctx context.Context, + obj runtime.Object, annotations map[string]string, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.AnnotatedEventf(obj, annotations, eventType, reason, msg) +} + +// fetchEtagIndex fetches the current etagIndex for the in the obj specified +// bucket using the given provider, while filtering them using .sourceignore +// rules. After fetching an object, the etag value in the index is updated to +// the current value to ensure accuracy. +func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + // Confirm bucket exists + exists, err := provider.BucketExists(ctxTimeout, obj.Spec.BucketName) + if err != nil { + return fmt.Errorf("failed to confirm existence of '%s' bucket: %w", obj.Spec.BucketName, err) + } + if !exists { + err = fmt.Errorf("bucket '%s' not found", obj.Spec.BucketName) + return err + } + + // Look for file with ignore rules first + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + if _, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if !provider.ObjectIsNotFound(err) { + return fmt.Errorf("failed to get Etag for '%s' object: %w", sourceignore.IgnoreFile, serror.SanitizeError(err)) + } + } + ps, err := sourceignore.ReadIgnoreFile(path, nil) + if err != nil { + return err + } + // In-spec patterns take precedence + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) + } + matcher := sourceignore.NewMatcher(ps) + + // Build up index + err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, obj.Spec.Prefix, func(key, etag string) error { + if strings.HasSuffix(key, "/") || key == sourceignore.IgnoreFile { + return nil + } + + if matcher.Match(strings.Split(key, "/"), false) { + return nil + } + + index.Add(key, etag) + return nil + }) + if err != nil { + return fmt.Errorf("indexation of objects from bucket '%s' failed: %w", obj.Spec.BucketName, err) + } + return nil +} + +// fetchIndexFiles fetches the object files for the keys from the given etagIndex +// using the given provider, and stores them into tempDir. It downloads in +// parallel, but limited to the maxConcurrentBucketFetches. +// Given an index is provided, the bucket is assumed to exist. +func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + // Download in parallel, but bound the concurrency. According to + // AWS and GCP docs, rate limits are either soft or don't exist: + // - https://cloud.google.com/storage/quotas + // - https://docs.aws.amazon.com/general/latest/gr/s3.html + // .. so, the limiting factor is this process keeping a small footprint. + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + sem := semaphore.NewWeighted(maxConcurrentBucketFetches) + for key, etag := range index.Index() { + k := key + t := etag + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(tempDir, k) + etag, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath) + if err != nil { + if provider.ObjectIsNotFound(err) { + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("indexed object '%s' disappeared from '%s' bucket", k, obj.Spec.BucketName)) + index.Delete(k) + return nil + } + return fmt.Errorf("failed to get '%s' object: %w", k, serror.SanitizeError(err)) + } + if t != etag { + index.Add(k, etag) + } + return nil + }) + } + return nil + }) + if err := group.Wait(); err != nil { + return fmt.Errorf("fetch from bucket '%s' failed: %w", obj.Spec.BucketName, err) + } + + return nil +} + +// setupCredentials retrieves and validates secrets for authentication, TLS configuration, and proxy settings. +// It returns all credentials needed for bucket providers. +func (r *BucketReconciler) setupCredentials(ctx context.Context, obj *sourcev1.Bucket) (*bucketCredentials, error) { + var secret *corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s': %w", secretName, err) + } + } + + var stsSecret *corev1.Secret + if obj.Spec.STS != nil && obj.Spec.STS.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.SecretRef.Name, + } + stsSecret = &corev1.Secret{} + if err := r.Get(ctx, secretName, stsSecret); err != nil { + return nil, fmt.Errorf("failed to get STS secret '%s': %w", secretName, err) + } + } + + var ( + err error + proxyURL *url.URL + tlsConfig *tls.Config + stsTLSConfig *tls.Config + ) + + if obj.Spec.ProxySecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.ProxySecretRef.Name, + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL: %w", err) + } + } + + if obj.Spec.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.CertSecretRef.Name, + } + tlsConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get TLS config: %w", err) + } + } + + if obj.Spec.STS != nil && obj.Spec.STS.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.CertSecretRef.Name, + } + stsTLSConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get STS TLS config: %w", err) + } + } + + return &bucketCredentials{ + secret: secret, + proxyURL: proxyURL, + tlsConfig: tlsConfig, + stsSecret: stsSecret, + stsTLSConfig: stsTLSConfig, + }, nil +} + +// createBucketProvider creates a provider-specific bucket client using the given credentials and configuration. +// It handles different bucket providers (AWS, GCP, Azure, generic) and returns the appropriate client. +func (r *BucketReconciler) createBucketProvider(ctx context.Context, obj *sourcev1.Bucket, creds *bucketCredentials) (BucketProvider, error) { + authOpts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + authOpts = append(authOpts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.BucketKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + authOpts = append(authOpts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if creds.proxyURL != nil { + authOpts = append(authOpts, auth.WithProxyURL(*creds.proxyURL)) + } + + if obj.Spec.Region != "" { + authOpts = append(authOpts, auth.WithSTSRegion(obj.Spec.Region)) + } + + if sts := obj.Spec.STS; sts != nil { + authOpts = append(authOpts, auth.WithSTSEndpoint(sts.Endpoint)) + } + + switch obj.Spec.Provider { + case sourcev1.BucketProviderGoogle: + var opts []gcp.Option + if creds.proxyURL != nil { + opts = append(opts, gcp.WithProxyURL(creds.proxyURL)) + } + + if creds.secret != nil { + if err := gcp.ValidateSecret(creds.secret); err != nil { + return nil, err + } + opts = append(opts, gcp.WithSecret(creds.secret)) + } else { + opts = append(opts, gcp.WithAuth(authOpts...)) + } + + return gcp.NewClient(ctx, obj, opts...) + + case sourcev1.BucketProviderAzure: + if err := azure.ValidateSecret(creds.secret); err != nil { + return nil, err + } + var opts []azure.Option + if creds.secret != nil { + opts = append(opts, azure.WithSecret(creds.secret)) + } + if creds.proxyURL != nil { + opts = append(opts, azure.WithProxyURL(creds.proxyURL)) + } + opts = append(opts, azure.WithAuth(authOpts...)) + return azure.NewClient(ctx, obj, opts...) + + default: + if err := minio.ValidateSecret(creds.secret); err != nil { + return nil, err + } + if sts := obj.Spec.STS; sts != nil { + if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil { + return nil, serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason) + } + if _, err := url.Parse(sts.Endpoint); err != nil { + return nil, serror.NewStalling(fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err), sourcev1.URLInvalidReason) + } + if err := minio.ValidateSTSSecret(sts.Provider, creds.stsSecret); err != nil { + return nil, serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + } + } + var opts []minio.Option + if creds.secret != nil { + opts = append(opts, minio.WithSecret(creds.secret)) + } else if obj.Spec.Provider == sourcev1.BucketProviderAmazon { + opts = append(opts, minio.WithAuth(authOpts...)) + } + if creds.tlsConfig != nil { + opts = append(opts, minio.WithTLSConfig(creds.tlsConfig)) + } + if creds.proxyURL != nil { + opts = append(opts, minio.WithProxyURL(creds.proxyURL)) + } + if creds.stsSecret != nil { + opts = append(opts, minio.WithSTSSecret(creds.stsSecret)) + } + if creds.stsTLSConfig != nil { + opts = append(opts, minio.WithSTSTLSConfig(creds.stsTLSConfig)) + } + return minio.NewClient(ctx, obj, opts...) + } +} + +// syncBucketArtifacts handles etag index retrieval and bucket object fetching. +// It fetches the etag index from the provider and downloads objects to the specified directory. +// Returns true if changes were detected and artifacts were updated. +func (r *BucketReconciler) syncBucketArtifacts(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, dir string) (bool, error) { + if err := fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { + return false, err + } + var changed bool + if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { + curRev := digest.Digest(artifact.Revision) + changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) + } + + // Fetch the bucket objects if required to. + if artifact := obj.GetArtifact(); artifact == nil || changed { + if err := fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { + return false, err + } + return true, nil + } + + return false, nil +} diff --git a/internal/controller/bucket_controller_fetch_test.go b/internal/controller/bucket_controller_fetch_test.go new file mode 100644 index 000000000..707d645f3 --- /dev/null +++ b/internal/controller/bucket_controller_fetch_test.go @@ -0,0 +1,292 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/index" +) + +type mockBucketObject struct { + etag string + data string +} + +type mockBucketClient struct { + bucketName string + objects map[string]mockBucketObject +} + +var errMockNotFound = fmt.Errorf("not found") + +func (m mockBucketClient) BucketExists(_ context.Context, name string) (bool, error) { + return name == m.bucketName, nil +} + +func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string) (string, error) { + if bucket != m.bucketName { + return "", fmt.Errorf("bucket does not exist") + } + // tiny bit of protocol, for convenience: if asked for an object "error", then return an error. + if obj == "error" { + return "", fmt.Errorf("I was asked to report an error") + } + object, ok := m.objects[obj] + if !ok { + return "", errMockNotFound + } + if err := os.WriteFile(path, []byte(object.data), os.FileMode(0660)); err != nil { + return "", err + } + return object.etag, nil +} + +func (m mockBucketClient) ObjectIsNotFound(e error) bool { + return e == errMockNotFound +} + +func (m mockBucketClient) VisitObjects(_ context.Context, _ string, _ string, f func(key, etag string) error) error { + for key, obj := range m.objects { + if err := f(key, obj.etag); err != nil { + return err + } + } + return nil +} + +func (m mockBucketClient) Close(_ context.Context) {} + +func (m *mockBucketClient) addObject(key string, object mockBucketObject) { + if m.objects == nil { + m.objects = make(map[string]mockBucketObject) + } + m.objects[key] = object +} + +func (m *mockBucketClient) objectsToDigestIndex() *index.Digester { + i := index.NewDigester() + for k, v := range m.objects { + i.Add(k, v.etag) + } + return i +} + +func Test_fetchEtagIndex(t *testing.T) { + bucketName := "all-my-config" + + bucket := sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Timeout: &metav1.Duration{Duration: 1 * time.Hour}, + }, + } + + t.Run("fetches etag index", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) + client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"}) + client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"}) + + index := index.NewDigester() + err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(3)) + }) + + t.Run("an error while bucket does not exist", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: "other-bucket-name"} + + index := index.NewDigester() + err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("not found")) + }) + + t.Run("filters with .sourceignore rules", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + client.addObject(".sourceignore", mockBucketObject{etag: "sourceignore1", data: `*.txt`}) + client.addObject("foo.yaml", mockBucketObject{etag: "etag1", data: "foo.yaml"}) + client.addObject("foo.txt", mockBucketObject{etag: "etag2", data: "foo.txt"}) + + index := index.NewDigester() + err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(filepath.Join(tmp, ".sourceignore")); err != nil { + t.Error(err) + } + + if ok := index.Has("foo.txt"); ok { + t.Error(fmt.Errorf("expected 'foo.txt' index item to not exist")) + } + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) + }) + + t.Run("filters with ignore rules from object", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + client.addObject(".sourceignore", mockBucketObject{etag: "sourceignore1", data: `*.txt`}) + client.addObject("foo.txt", mockBucketObject{etag: "etag1", data: "foo.txt"}) + + ignore := "!*.txt" + bucket := bucket.DeepCopy() + bucket.Spec.Ignore = &ignore + + index := index.NewDigester() + err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(filepath.Join(tmp, ".sourceignore")); err != nil { + t.Error(err) + } + + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) + if ok := index.Has("foo.txt"); !ok { + t.Error(fmt.Errorf("expected 'foo.txt' index item to exist")) + } + }) +} + +func Test_fetchFiles(t *testing.T) { + bucketName := "all-my-config" + + bucket := sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Timeout: &metav1.Duration{Duration: 1 * time.Hour}, + }, + } + + t.Run("fetches files", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) + client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"}) + client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"}) + + index := client.objectsToDigestIndex() + + err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + + for path := range index.Index() { + p := filepath.Join(tmp, path) + _, err := os.Stat(p) + if err != nil { + t.Error(err) + } + } + }) + + t.Run("an error while fetching returns an error for the whole procedure", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName, objects: map[string]mockBucketObject{}} + client.objects["error"] = mockBucketObject{} + + err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToDigestIndex(), tmp) + if err == nil { + t.Fatal("expected error but got nil") + } + }) + + t.Run("a changed etag updates the index", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag2"}) + + index := index.NewDigester() + index.Add("foo.yaml", "etag1") + err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + f := index.Get("foo.yaml") + g := NewWithT(t) + g.Expect(f).To(Equal("etag2")) + }) + + t.Run("a disappeared index entry is removed from the index", func(t *testing.T) { + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) + + index := index.NewDigester() + index.Add("foo.yaml", "etag1") + // Does not exist on server + index.Add("bar.yaml", "etag2") + + err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + f := index.Get("foo.yaml") + g := NewWithT(t) + g.Expect(f).To(Equal("etag1")) + g.Expect(index.Has("bar.yaml")).To(BeFalse()) + }) + + t.Run("can fetch more than maxConcurrentFetches", func(t *testing.T) { + // this will fail if, for example, the semaphore is not used correctly and blocks + tmp := t.TempDir() + + client := mockBucketClient{bucketName: bucketName} + for i := 0; i < 2*maxConcurrentBucketFetches; i++ { + f := fmt.Sprintf("file-%d", i) + client.addObject(f, mockBucketObject{etag: f, data: f}) + } + index := client.objectsToDigestIndex() + + err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) + if err != nil { + t.Fatal(err) + } + }) +} diff --git a/internal/controller/bucket_controller_test.go b/internal/controller/bucket_controller_test.go new file mode 100644 index 000000000..00ed46cb7 --- /dev/null +++ b/internal/controller/bucket_controller_test.go @@ -0,0 +1,2007 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/index" + gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs" + s3mock "github.com/fluxcd/source-controller/internal/mock/s3" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +// Environment variable to set the GCP Storage host for the GCP client. +const EnvGcpStorageHost = "STORAGE_EMULATOR_HOST" + +func TestBucketReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "bucket-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + bucket := &sourcev1.Bucket{} + bucket.Name = "test-bucket" + bucket.Namespace = namespaceName + bucket.Spec = sourcev1.BucketSpec{ + Interval: metav1.Duration{Duration: interval}, + BucketName: "foo", + Endpoint: "bar", + } + // Add a test finalizer to prevent the object from getting deleted. + bucket.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, bucket)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, bucket)).NotTo(HaveOccurred()) + + r := &BucketReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(bucket)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := s3mock.NewServer("test-bucket") + s3Server.Objects = []*s3mock.Object{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + origObj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + obj := origObj.DeepCopy() + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: bucketReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + uo, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(uo) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.Bucket, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + defer func() { + g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) + }() + + r := &BucketReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.Bucket{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + index := index.NewDigester() + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(context.TODO(), sp, obj, index, "") + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_reconcileSource_generic(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3mock.Object + middleware http.Handler + secret *corev1.Secret + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertIndex *index.Digester + assertConditions []metav1.Condition + }{ + { + name: "Reconciles generic source", + bucketName: "dummy", + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "Observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields 'accesskey' and 'secretkey'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes non-existing certSecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid certSecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), + }, + }, + { + name: "Observes non-existing proxySecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid proxySecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{}, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), + }, + }, + { + name: "Observes non-existing sts.secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + SecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "https://something", + SecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data for 'ldap' STS provider: required fields username, password"), + }, + }, + { + name: "Observes non-existing sts.certSecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.certSecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "https://something", + CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), + }, + }, + { + name: "Observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes incompatible sts.provider", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "aws", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidSTSConfigurationReason, "STS provider 'aws' is not supported for 'generic' bucket provider"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.endpoint", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "something\t", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "failed to parse STS endpoint 'something\t': parse \"something\\t\": net/url: invalid control character in URL"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3mock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "!ignored/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3mock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "ignored/file.txt": "f08907038338288420ae7dc2d30c0497", + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + }, + }, + { + name: "Up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + bucketName: "dummy", + bucketObjects: []*s3mock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.Bucket{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + tmpDir := t.TempDir() + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3mock.Server + if tt.bucketName != "" { + server = s3mock.NewServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + index := index.NewDigester() + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, index, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcsmock.Object + secret *corev1.Secret + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertIndex *index.Digester + assertConditions []metav1.Condition + disableObjectLevelWorkloadIdentity bool + }{ + { + name: "Reconciles GCS source", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "Observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes non-existing proxySecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid proxySecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 1, + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 4, + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + Generation: 3, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "!ignored/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcsmock.Object{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 1, + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + Generation: 2, + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + Generation: 4, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "ignored/file.txt": "f08907038338288420ae7dc2d30c0497", + "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"), + }, + }, + { + name: "Up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + Generation: 2, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + Generation: 2, + }, + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Object-Level Workload Identity (no secret)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Controller-Level Workload Identity (no secret, no SA)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + // ServiceAccountName は設定しない (Controller-Level) + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Object-Level fails when feature gate disabled", + bucketName: "dummy", + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + disableObjectLevelWorkloadIdentity: true, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.Bucket{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + + // Handle ObjectLevelWorkloadIdentity feature gate + if !tt.disableObjectLevelWorkloadIdentity { + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + } + + tmpDir := t.TempDir() + + // Test bucket object. + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: "gcp", + }, + } + + // Set up the mock GCP bucket server. + server := gcsmock.NewServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(EnvGcpStorageHost, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(EnvGcpStorageHost)).ToNot(HaveOccurred()) + }() + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + index := index.NewDigester() + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, index, tmpDir) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + + g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes ArtifactInStorage=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Up-to-date artifact should not persist and update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + revision := index.Digest(intdigest.Canonical) + obj.Spec.Interval = metav1.Duration{Duration: interval} + // Incomplete artifact + obj.Status.Artifact = &meta.Artifact{Revision: revision.String()} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + // Still incomplete + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + t.Expect(err).ToNot(HaveOccurred()) + t.Expect(f.Close()).ToNot(HaveOccurred()) + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "is not a directory"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.Bucket{}) + + r := &BucketReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + tmpDir := t.TempDir() + index := index.NewDigester() + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, index, tmpDir) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(context.TODO(), sp, obj, index, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestBucketReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.Bucket) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "positive conditions only", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + APIVersion: sourcev1.GroupVersion.String(), + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "foo", + }, + } + + c := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithObjects(obj). + WithStatusSubresource(&sourcev1.Bucket{}). + Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(bucketReadyCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_notify(t *testing.T) { + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.Bucket) + newObjBeforeFunc func(obj *sourcev1.Bucket) + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + }, + wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal Succeeded stored artifact with 2 fetched files from", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + BucketName: "test-bucket", + }, + } + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &BucketReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), + } + index := index.NewDigester(index.WithIndex(map[string]string{ + "zzz": "qqq", + "bbb": "ddd", + })) + reconciler.notify(ctx, oldObj, newObj, index, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { + tests := []struct { + name string + bucketProvider string + stsConfig *sourcev1.BucketSTSSpec + err string + }{ + { + name: "gcp unsupported", + bucketProvider: "gcp", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "STS configuration is only supported for the 'aws' and 'generic' Bucket providers", + }, + { + name: "azure unsupported", + bucketProvider: "azure", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "STS configuration is only supported for the 'aws' and 'generic' Bucket providers", + }, + { + name: "aws supported", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + }, + { + name: "invalid endpoint", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "test", + }, + err: "spec.sts.endpoint in body should match '^(http|https)://.*$'", + }, + { + name: "gcp can be created without STS config", + bucketProvider: "gcp", + }, + { + name: "azure can be created without STS config", + bucketProvider: "azure", + }, + { + name: "generic can be created without STS config", + bucketProvider: "generic", + }, + { + name: "aws can be created without STS config", + bucketProvider: "aws", + }, + { + name: "ldap unsupported for aws", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + }, + err: "'aws' is the only supported STS provider for the 'aws' Bucket provider", + }, + { + name: "aws unsupported for generic", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "'ldap' is the only supported STS provider for the 'generic' Bucket provider", + }, + { + name: "aws does not require a secret", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + SecretRef: &meta.LocalObjectReference{}, + }, + err: "spec.sts.secretRef is not required for the 'aws' STS provider", + }, + { + name: "aws does not require a cert secret", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + CertSecretRef: &meta.LocalObjectReference{}, + }, + err: "spec.sts.certSecretRef is not required for the 'aws' STS provider", + }, + { + name: "ldap may use a secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + SecretRef: &meta.LocalObjectReference{}, + }, + }, + { + name: "ldap may use a cert secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + CertSecretRef: &meta.LocalObjectReference{}, + }, + }, + { + name: "ldap may not use a secret or cert secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: tt.bucketProvider, + BucketName: "test", + Endpoint: "test", + Suspend: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + STS: tt.stsConfig, + }, + } + + err := testEnv.Create(ctx, obj) + if err == nil { + defer func() { + err := testEnv.Delete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + }() + } + + if tt.err != "" { + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/internal/controller/common_test.go b/internal/controller/common_test.go new file mode 100644 index 000000000..d9dcf88c1 --- /dev/null +++ b/internal/controller/common_test.go @@ -0,0 +1,146 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + "github.com/fluxcd/source-controller/internal/object" +) + +// waitForSourceDeletion is a generic test helper to wait for object deletion of +// any source kind. +func waitForSourceDeletion(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + + key := client.ObjectKeyFromObject(obj) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +// waitForSuspended is a generic test helper to wait for object to be suspended +// of any source kind. +func waitForSuspended(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + + key := client.ObjectKeyFromObject(obj) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + suspended, err := object.GetSuspend(obj) + if err != nil { + return false + } + return suspended == true + }, timeout).Should(BeTrue()) +} + +// waitForSourceReadyWithArtifact is a generic test helper to wait for an object +// to be ready of any source kind that have artifact in status when ready. +func waitForSourceReadyWithArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + waitForSourceReady(ctx, g, obj, true) +} + +// waitForSourceReadyWithoutArtifact is a generic test helper to wait for an object +// to be ready of any source kind that don't have artifact in status when ready. +func waitForSourceReadyWithoutArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + waitForSourceReady(ctx, g, obj, false) +} + +// waitForSourceReady is a generic test helper to wait for an object to be +// ready of any source kind. +func waitForSourceReady(ctx context.Context, g *WithT, obj conditions.Setter, withArtifact bool) { + g.THelper() + + key := client.ObjectKeyFromObject(obj) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if withArtifact { + artifact, err := object.GetArtifact(obj) + if err != nil { + return false + } + if artifact == nil { + return false + } + } + if !conditions.IsReady(obj) { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + statusObservedGen, err := object.GetStatusObservedGeneration(obj) + if err != nil { + return false + } + return obj.GetGeneration() == readyCondition.ObservedGeneration && + obj.GetGeneration() == statusObservedGen + }, timeout).Should(BeTrue()) +} + +// testSuspendedObjectDeleteWithArtifact is a generic test helper to test if a +// suspended object can be deleted for objects that have artifact in status when +// ready. +func testSuspendedObjectDeleteWithArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + testSuspendedObjectDelete(ctx, g, obj, true) +} + +// testSuspendedObjectDeleteWithoutArtifact is a generic test helper to test if +// a suspended object can be deleted for objects that don't have artifact in +// status when ready. +func testSuspendedObjectDeleteWithoutArtifact(ctx context.Context, g *WithT, obj conditions.Setter) { + g.THelper() + testSuspendedObjectDelete(ctx, g, obj, false) +} + +// testSuspendedObjectDelete is a generic test helper to test if a suspended +// object can be deleted. +func testSuspendedObjectDelete(ctx context.Context, g *WithT, obj conditions.Setter, withArtifact bool) { + g.THelper() + + // Create the object and wait for it to be ready. + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + waitForSourceReady(ctx, g, obj, withArtifact) + + // Suspend the object. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(object.SetSuspend(obj, true)).ToNot(HaveOccurred()) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + waitForSuspended(ctx, g, obj) + + // Delete the object. + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + waitForSourceDeletion(ctx, g, obj) +} diff --git a/internal/controller/gitrepository_controller.go b/internal/controller/gitrepository_controller.go new file mode 100644 index 000000000..1208c8ae0 --- /dev/null +++ b/internal/controller/gitrepository_controller.go @@ -0,0 +1,1361 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + "github.com/fluxcd/pkg/git/github" + "github.com/fluxcd/pkg/runtime/logger" + "github.com/fluxcd/pkg/runtime/secrets" + "github.com/go-git/go-git/v5/plumbing/transport" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/git/gogit" + "github.com/fluxcd/pkg/git/repository" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/features" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" +) + +// gitRepositoryReadyCondition contains the information required to summarize a +// v1.GitRepository Ready Condition. +var gitRepositoryReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// gitRepositoryFailConditions contains the conditions that represent a failure. +var gitRepositoryFailConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.StorageOperationFailedCondition, +} + +// getPatchOptions composes patch options based on the given parameters. +// It is used as the options used when patching an object. +func getPatchOptions(ownedConditions []string, controllerName string) []patch.Option { + return []patch.Option{ + patch.WithOwnedConditions{Conditions: ownedConditions}, + patch.WithFieldOwner(controllerName), + } +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// GitRepositoryReconciler reconciles a v1.GitRepository object. +type GitRepositoryReconciler struct { + client.Client + kuberecorder.EventRecorder + helper.Metrics + + Storage *storage.Storage + ControllerName string + TokenCache *cache.TokenCache + + requeueDependency time.Duration + features map[string]bool + + patchOptions []patch.Option +} + +type GitRepositoryReconcilerOptions struct { + DependencyRequeueInterval time.Duration + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// gitRepositoryReconcileFunc is the function type for all the +// v1.GitRepository (sub)reconcile functions. +type gitRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) + +func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) +} + +func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error { + r.patchOptions = getPatchOptions(gitRepositoryReadyCondition.Owned, r.ControllerName) + + r.requeueDependency = opts.DependencyRequeueInterval + + if r.features == nil { + r.features = features.FeatureGates() + } + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.GitRepository{}, builder.WithPredicates( + predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), + )). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the GitRepository + obj := &sourcev1.GitRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(gitRepositoryReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition + // between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []gitRepositoryReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileInclude, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the gitRepositoryReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *GitRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, reconcilers []gitRepositoryReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var recAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + recAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case recAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + // Create temp dir for Git clone + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer func() { + if err = os.RemoveAll(tmpDir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") + } + }() + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + + // Run the sub-reconcilers and build the result of reconciliation. + var ( + commit git.Commit + includes artifactSet + + res sreconcile.Result + resErr error + ) + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, &commit, &includes, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, commit, res, resErr) + + return res, resErr +} + +// notify emits notification related to the result of reconciliation. +func (r *GitRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.GitRepository, commit git.Commit, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact, no-op reconciliation + // and recovery from any failure. + if r.shouldNotify(oldObj, newObj, res, resErr) { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + // A partial commit due to no-op clone doesn't contain the commit + // message information. Have separate message for it. + var message string + if git.IsConcreteCommit(commit) { + message = fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage()) + } else { + message = fmt.Sprintf("stored artifact for commit '%s'", commitReference(newObj, &commit)) + } + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + "NewArtifact", message) + ctrl.LoggerFrom(ctx).Info(message) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, gitRepositoryFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) + } + } + } +} + +// shouldNotify analyzes the result of subreconcilers and determines if a +// notification should be sent. It decides about the final informational +// notifications after the reconciliation. Failure notification and in-line +// notifications are not handled here. +func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepository, res sreconcile.Result, resErr error) bool { + // Notify for successful reconciliation. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + return true + } + // Notify for no-op reconciliation with ignore error. + if resErr != nil && res == sreconcile.ResultEmpty && newObj.Status.Artifact != nil { + // Convert to Generic error and check for ignore. + if ge, ok := resErr.(*serror.Generic); ok { + return ge.Ignore + } + } + return false +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of the Artifact in the Status of the object is updated, to +// ensure it matches the Storage server hostname of current runtime. +func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource ensures the upstream Git repository and reference can be +// cloned and checked out using the specified configuration, and observes its +// state. It also checks if the included repositories are available for use. +// +// The included repositories are fetched and their metadata are stored. In case +// one of the included repositories isn't ready, it records +// v1.IncludeUnavailableCondition=True and returns early. When all the +// included repositories are ready, it removes +// v1.IncludeUnavailableCondition from the object. +// When the included artifactSet differs from the current set in the Status of +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. +// The repository is cloned to the given dir, using the specified configuration +// to check out the reference. In case of an error during this process +// (including transient errors), it records v1.FetchFailedCondition=True +// and returns early. +// On a successful checkout, it removes v1.FetchFailedCondition and +// compares the current revision of HEAD to the revision of the Artifact in the +// Status of the object. It records v1.ArtifactOutdatedCondition=True when +// they differ. +// If specified, the signature of the Git commit is verified. If the signature +// can not be verified or the verification fails, it records +// v1.SourceVerifiedCondition=False and returns early. When successful, +// it records v1.SourceVerifiedCondition=True. +// When all the above is successful, the given Commit pointer is set to the +// commit of the checked out Git repository. +// +// If the optimized git clone feature is enabled, it checks if the remote repo +// and the local artifact are on the same revision, and no other source content +// related configurations have changed since last reconciliation. If there's a +// change, it short-circuits the whole reconciliation with an early return. +func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + // Remove previously failed source verification status conditions. The + // failing verification should be recalculated. But an existing successful + // verification need not be removed as it indicates verification of previous + // version. + if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + + var proxyOpts *transport.ProxyOptions + var proxyURL *url.URL + if obj.Spec.ProxySecretRef != nil { + var err error + secretRef := types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to configure proxy options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e + } + proxyOpts = &transport.ProxyOptions{URL: proxyURL.String()} + } + + u, err := url.Parse(obj.Spec.URL) + if err != nil { + e := serror.NewStalling( + fmt.Errorf("failed to parse url '%s': %w", obj.Spec.URL, err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + authOpts, err := r.getAuthOpts(ctx, obj, *u, proxyURL) + if err != nil { + // Return error as the world as observed may change + return sreconcile.ResultEmpty, err + } + + // Fetch the included artifact metadata. + artifacts, err := r.fetchIncludes(ctx, obj) + if err != nil { + return sreconcile.ResultEmpty, err + } + + // Observe if the artifacts still match the previous included ones + if artifacts.Diff(obj.Status.IncludedArtifacts) { + message := "included artifacts differ from last observed includes" + if obj.Status.IncludedArtifacts != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + + // Persist the ArtifactSet. + *includes = *artifacts + + c, err := r.gitCheckout(ctx, obj, authOpts, proxyOpts, dir, true) + if err != nil { + return sreconcile.ResultEmpty, err + } + if c == nil { + e := serror.NewGeneric( + fmt.Errorf("git repository is empty"), + "EmptyGitRepository", + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + // Assign the commit to the shared commit reference. + *commit = *c + + // If it's a partial commit obtained from an existing artifact, check if the + // reconciliation can be skipped if other configurations have not changed. + if !git.IsConcreteCommit(*commit) { + // Check if the content config contributing to the artifact has changed. + if !gitContentConfigChanged(obj, includes) { + ge := serror.NewGeneric( + fmt.Errorf("no changes since last reconcilation: observed revision '%s'", + commitReference(obj, commit)), sourcev1.GitOperationSucceedReason, + ) + ge.Notification = false + ge.Ignore = true + // Log it as this will not be passed to the runtime. + ge.Log = true + ge.Event = corev1.EventTypeNormal + // Remove any stale fetch failed condition. + conditions.Delete(obj, sourcev1.FetchFailedCondition) + // IMPORTANT: This must be set to ensure that the observed + // generation of this condition is updated. In case of full + // reconciliation reconcileArtifact() ensures that it's set at the + // very end. + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact for revision '%s'", commitReference(obj, commit)) + // TODO: Find out if such condition setting is needed when commit + // signature verification is enabled. + return sreconcile.ResultEmpty, ge + } + + // If we can't skip the reconciliation, checkout again without any + // optimization. + c, err := r.gitCheckout(ctx, obj, authOpts, proxyOpts, dir, false) + if err != nil { + return sreconcile.ResultEmpty, err + } + *commit = *c + } + ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commitReference(obj, commit)) + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Validate sparse checkout paths after successful checkout. + if err := r.validateSparseCheckoutPaths(ctx, obj, dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to sparse checkout directories : %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Verify commit signature + if result, err := r.verifySignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { + return result, err + } + + // Mark observations about the revision on the object + if !obj.GetArtifact().HasRevision(commitReference(obj, commit)) { + message := fmt.Sprintf("new upstream revision '%s'", commitReference(obj, commit)) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + return sreconcile.ResultSuccess, nil +} + +// getAuthOpts fetches the secret containing the auth options (if specified), +// constructs a git.AuthOptions object using those options along with the provided +// URL and returns it. +func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1.GitRepository, + u url.URL, proxyURL *url.URL) (*git.AuthOptions, error) { + var secret *corev1.Secret + var authData map[string][]byte + if obj.Spec.SecretRef != nil { + var err error + secret, err = r.getSecret(ctx, obj.Spec.SecretRef.Name, obj.GetNamespace()) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + authData = secret.Data + } + + // Configure authentication strategy to access the source + opts, err := git.NewAuthOptions(u, authData) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + + // Configure provider authentication if specified. + var getCreds func() (*authutils.GitCredentials, error) + switch provider := obj.GetProvider(); provider { + case sourcev1.GitProviderAzure: // If AWS or GCP are added in the future they can be added here separated by a comma. + getCreds = func() (*authutils.GitCredentials, error) { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := serror.NewStalling(fmt.Errorf(msgFmt, gate), meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "%s", err) + return nil, err + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.GitRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } + + return authutils.GetGitCredentials(ctx, provider, opts...) + } + case sourcev1.GitProviderGitHub: + // if provider is github, but secret ref is not specified + if obj.Spec.SecretRef == nil { + e := serror.NewStalling( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + authMethods, err := secrets.AuthMethodsFromSecret(ctx, secret, secrets.WithTLSSystemCertPool()) + if err != nil { + return nil, err + } + if !authMethods.HasGitHubAppData() { + e := serror.NewGeneric( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + getCreds = func() (*authutils.GitCredentials, error) { + var appOpts []github.OptFunc + + appOpts = append(appOpts, github.WithAppData(authMethods.GitHubAppData)) + + if proxyURL != nil { + appOpts = append(appOpts, github.WithProxyURL(proxyURL)) + } + + if r.TokenCache != nil { + appOpts = append(appOpts, github.WithCache(r.TokenCache, sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile)) + } + + if authMethods.HasTLS() { + appOpts = append(appOpts, github.WithTLSConfig(authMethods.TLS)) + } + + username, password, err := github.GetCredentials(ctx, appOpts...) + if err != nil { + return nil, err + } + return &authutils.GitCredentials{ + Username: username, + Password: password, + }, nil + } + default: + // analyze secret, if it has github app data, perhaps provider should have been github. + if appID := authData[github.KeyAppID]; len(appID) != 0 { + e := serror.NewGeneric( + fmt.Errorf("secretRef '%s/%s' has github app data but provider is not set to github", obj.GetNamespace(), obj.Spec.SecretRef.Name), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + } + if getCreds != nil { + creds, err := getCreds() + if err != nil { + // Check if it's already a structured error and preserve it + switch err.(type) { + case *serror.Stalling, *serror.Generic: + return nil, err + } + + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + opts.BearerToken = creds.BearerToken + opts.Username = creds.Username + opts.Password = creds.Password + } + return opts, nil +} + +func (r *GitRepositoryReconciler) getSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, key, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s/%s': %w", namespace, name, err) + } + return secret, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact and/or artifactSet (includes) and observed artifact +// content config do not differ from the object's current, it returns early. +// Source ignore patterns are loaded, and the given directory is archived while +// taking these patterns into account. +// On a successful archive, the Artifact, Includes, observed ignore, recurse +// submodules and observed include in the Status of the object are set. +func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + + // Create potential new artifact with current available metadata + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commitReference(obj, commit), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) + + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if curArtifact := obj.GetArtifact(); curArtifact.HasRevision(artifact.Revision) && + !includes.Diff(obj.Status.IncludedArtifacts) && + !gitContentConfigChanged(obj, includes) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact for revision '%s'", curArtifact.Revision) + } + }() + + // The artifact is up-to-date + if curArtifact := obj.GetArtifact(); curArtifact.HasRevision(artifact.Revision) && + !includes.Diff(obj.Status.IncludedArtifacts) && + !gitContentConfigChanged(obj, includes) { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", curArtifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to stat target artifact path: %w", err), + sourcev1.StatOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } else if !f.IsDir() { + e := serror.NewGeneric( + fmt.Errorf("invalid target path: '%s' is not a directory", dir), + sourcev1.InvalidPathReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) + } + defer unlock() + + // Load ignore rules for archiving + ignoreDomain := strings.Split(dir, string(filepath.Separator)) + ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to load source ignore patterns from repository: %w", err), + "SourceIgnoreError", + ) + } + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) + } + + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to archive artifact to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Record the observations on the object. + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.IncludedArtifacts = *includes + obj.Status.ObservedIgnore = obj.Spec.Ignore + obj.Status.ObservedRecurseSubmodules = obj.Spec.RecurseSubmodules + obj.Status.ObservedInclude = obj.Spec.Include + obj.Status.ObservedSparseCheckout = obj.Spec.SparseCheckout + + // Remove the deprecated symlink. + // TODO(hidde): remove 2 minor versions from introduction of v1. + symArtifact := artifact.DeepCopy() + symArtifact.Path = filepath.Join(filepath.Dir(symArtifact.Path), "latest.tar.gz") + if fi, err := os.Lstat(r.Storage.LocalPath(artifact)); err == nil { + if fi.Mode()&os.ModeSymlink != 0 { + if err := os.Remove(r.Storage.LocalPath(*symArtifact)); err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to remove (deprecated) symlink: %s", err) + } + } + } + + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileInclude reconciles the on the object specified +// v1.GitRepositoryInclude list by copying their Artifact (sub)contents to +// the specified paths in the given directory. +// +// When one of the includes is unavailable, it marks the object with +// v1.IncludeUnavailableCondition=True and returns early. +// When the copy operations are successful, it removes the +// v1.IncludeUnavailableCondition from the object. +// When the composed artifactSet differs from the current set in the Status of +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + + for i, incl := range obj.Spec.Include { + // Do this first as it is much cheaper than copy operations + toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), + "IllegalPath", + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Get artifact at the same include index. The artifactSet is created + // such that the index of artifactSet matches with the index of Include. + // Hence, index is used here to pick the associated artifact from + // includes. + var artifact *meta.Artifact + for j, art := range *includes { + if i == j { + artifact = art + } + } + + // Copy artifact (sub)contents to configured directory. + if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), + "CopyFailure", + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) + return sreconcile.ResultSuccess, nil +} + +// gitCheckout builds checkout options with the given configurations and +// performs a git checkout. +func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1.GitRepository, + authOpts *git.AuthOptions, proxyOpts *transport.ProxyOptions, dir string, optimized bool) (*git.Commit, error) { + + // Configure checkout strategy. + cloneOpts := repository.CloneConfig{ + RecurseSubmodules: obj.Spec.RecurseSubmodules, + ShallowClone: true, + } + if ref := obj.Spec.Reference; ref != nil { + cloneOpts.Branch = ref.Branch + cloneOpts.Commit = ref.Commit + cloneOpts.Tag = ref.Tag + cloneOpts.SemVer = ref.SemVer + cloneOpts.RefName = ref.Name + } + if obj.Spec.SparseCheckout != nil { + // Trim any leading "./" in the directory paths since underlying go-git API does not honor them. + sparseCheckoutDirs := make([]string, len(obj.Spec.SparseCheckout)) + for i, path := range obj.Spec.SparseCheckout { + sparseCheckoutDirs[i] = strings.TrimPrefix(path, "./") + } + cloneOpts.SparseCheckoutDirectories = sparseCheckoutDirs + } + // Only if the object has an existing artifact in storage, attempt to + // short-circuit clone operation. reconcileStorage has already verified + // that the artifact exists. + if optimized && conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) { + if artifact := obj.GetArtifact(); artifact != nil { + cloneOpts.LastObservedCommit = artifact.Revision + } + } + + gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + clientOpts := []gogit.ClientOption{gogit.WithDiskStorage()} + if authOpts.Transport == git.HTTP { + clientOpts = append(clientOpts, gogit.WithInsecureCredentialsOverHTTP()) + } + if proxyOpts != nil { + clientOpts = append(clientOpts, gogit.WithProxy(*proxyOpts)) + } + + gitReader, err := gogit.NewClient(dir, authOpts, clientOpts...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create Git client: %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + defer gitReader.Close() + + commit, err := gitReader.Clone(gitCtx, obj.Spec.URL, cloneOpts) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to checkout and determine revision: %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + + return commit, nil +} + +// fetchIncludes fetches artifact metadata of all the included repos. +func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *sourcev1.GitRepository) (*artifactSet, error) { + artifacts := make(artifactSet, len(obj.Spec.Include)) + for i, incl := range obj.Spec.Include { + // Retrieve the included GitRepository. + dep := &sourcev1.GitRepository{} + if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { + e := serror.NewWaiting( + fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err), + "NotFound", + ) + e.RequeueAfter = r.requeueDependency + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, "%s", e) + return nil, e + } + + // Confirm include has an artifact + if dep.GetArtifact() == nil { + e := serror.NewWaiting( + fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name), + "NoArtifact", + ) + e.RequeueAfter = r.requeueDependency + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, "%s", e) + return nil, e + } + + artifacts[i] = dep.GetArtifact().DeepCopy() + } + + // We now know all the includes are available. + conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) + + return &artifacts, nil +} + +// verifySignature verifies the signature of the given Git commit and/or its referencing tag +// depending on the verification mode specified on the object. +// If the signature can not be verified or the verification fails, it records +// v1.SourceVerifiedCondition=False and returns. +// When successful, it records v1.SourceVerifiedCondition=True. +// If no verification mode is specified on the object, the +// v1.SourceVerifiedCondition Condition is removed. +func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { + // Check if there is a commit verification is configured and remove any old + // observations if there is none + if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { + obj.Status.SourceVerificationMode = nil + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + return sreconcile.ResultSuccess, nil + } + + // Get secret with GPG data + publicKeySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: obj.Spec.Verification.SecretRef.Name, + } + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { + e := serror.NewGeneric( + fmt.Errorf("PGP public keys secret error: %w", err), + "VerificationError", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + var keyRings []string + for _, v := range secret.Data { + keyRings = append(keyRings, string(v)) + } + + var message strings.Builder + if obj.Spec.Verification.VerifyTag() { + // If we need to verify a tag object, then the commit must have a tag + // that points to it. If it does not, then its safe to asssume that + // the checkout didn't happen via a tag reference, thus the object can + // be marked as stalled. + tag := commit.ReferencingTag + if tag == nil { + err := serror.NewStalling( + errors.New("cannot verify tag object's signature if a tag reference is not specified"), + "InvalidVerificationMode", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, "%s", err) + return sreconcile.ResultEmpty, err + } + if !git.IsSignedTag(*tag) { + // If the tag was not signed then we can't verify its signature + // but since the upstream tag object can change at any time, we can't + // mark the object as stalled. + err := serror.NewGeneric( + fmt.Errorf("cannot verify signature of tag '%s' since it is not signed", commit.ReferencingTag.String()), + "InvalidGitObject", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, "%s", err) + return sreconcile.ResultEmpty, err + } + + // Verify tag with GPG data from secret + tagEntity, err := tag.Verify(keyRings...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("signature verification of tag '%s' failed: %w", tag.String(), err), + "InvalidTagSignature", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + // Return error in the hope the secret changes + return sreconcile.ResultEmpty, e + } + + message.WriteString(fmt.Sprintf("verified signature of\n\t- tag '%s' with key '%s'", tag.String(), tagEntity)) + } + + if obj.Spec.Verification.VerifyHEAD() { + // Verify commit with GPG data from secret + headEntity, err := commit.Verify(keyRings...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err), + "InvalidCommitSignature", + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + // Return error in the hope the secret changes + return sreconcile.ResultEmpty, e + } + // If we also verified the tag previously, then append to the message. + if message.Len() > 0 { + message.WriteString(fmt.Sprintf("\n\t- commit '%s' with key '%s'", commit.Hash.String(), headEntity)) + } else { + message.WriteString(fmt.Sprintf("verified signature of\n\t- commit '%s' with key '%s'", commit.Hash.String(), headEntity)) + } + } + + reason := meta.SucceededReason + mode := obj.Spec.Verification.GetMode() + obj.Status.SourceVerificationMode = &mode + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, reason, "%s", message.String()) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, reason, "%s", message.String()) + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless the +// deletion timestamp on the object is set. Which will result in the +// removal of all Artifacts for the objects. +func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// gitContentConfigChanged evaluates the current spec with the observations of +// the artifact in the status to determine if artifact content configuration has +// changed and requires rebuilding the artifact. Rebuilding the artifact is also +// required if the object needs to be (re)verified. +func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet) bool { + if !ptr.Equal(obj.Spec.Ignore, obj.Status.ObservedIgnore) { + return true + } + if obj.Spec.RecurseSubmodules != obj.Status.ObservedRecurseSubmodules { + return true + } + if len(obj.Spec.Include) != len(obj.Status.ObservedInclude) { + return true + } + if requiresVerification(obj) { + return true + } + if len(obj.Spec.SparseCheckout) != len(obj.Status.ObservedSparseCheckout) { + return true + } + for index, dir := range obj.Spec.SparseCheckout { + if dir != obj.Status.ObservedSparseCheckout[index] { + return true + } + } + + // Convert artifactSet to index addressable artifacts and ensure that it and + // the included artifacts include all the include from the spec. + artifacts := []*meta.Artifact(*includes) + if len(obj.Spec.Include) != len(artifacts) { + return true + } + if len(obj.Spec.Include) != len(obj.Status.IncludedArtifacts) { + return true + } + + // The order of spec.include, status.IncludeArtifacts and + // status.observedInclude are the same. Compare the values by index. + for index, incl := range obj.Spec.Include { + observedIncl := obj.Status.ObservedInclude[index] + observedInclArtifact := obj.Status.IncludedArtifacts[index] + currentIncl := artifacts[index] + + // Check if include is the same in spec and status. + if !gitRepositoryIncludeEqual(incl, observedIncl) { + return true + } + + // Check if the included repositories are still the same. + if !observedInclArtifact.HasRevision(currentIncl.Revision) { + return true + } + if !observedInclArtifact.HasDigest(currentIncl.Digest) { + return true + } + } + return false +} + +// validateSparseCheckoutPaths checks if the sparse checkout paths exist in the cloned repository. +func (r *GitRepositoryReconciler) validateSparseCheckoutPaths(ctx context.Context, obj *sourcev1.GitRepository, dir string) error { + if obj.Spec.SparseCheckout != nil { + for _, path := range obj.Spec.SparseCheckout { + fullPath := filepath.Join(dir, path) + if _, err := os.Lstat(fullPath); err != nil { + return fmt.Errorf("sparse checkout dir '%s' does not exist in repository: %w", path, err) + } + } + } + return nil +} + +// Returns true if both GitRepositoryIncludes are equal. +func gitRepositoryIncludeEqual(a, b sourcev1.GitRepositoryInclude) bool { + if a.GitRepositoryRef != b.GitRepositoryRef { + return false + } + if a.FromPath != b.FromPath { + return false + } + if a.ToPath != b.ToPath { + return false + } + return true +} + +func commitReference(obj *sourcev1.GitRepository, commit *git.Commit) string { + if obj.Spec.Reference != nil && obj.Spec.Reference.Name != "" { + return commit.AbsoluteReference() + } + return commit.String() +} + +// requiresVerification inspects a GitRepository's verification spec and its status +// to determine whether the Git repository needs to be verified again. It does so by +// first checking if the GitRepository has a verification spec. If it does, then +// it returns true based on the following three conditions: +// +// - If the object does not have a observed verification mode in its status. +// - If the observed verification mode indicates that only the tag had been +// verified earlier and the HEAD also needs to be verified now. +// - If the observed verification mode indicates that only the HEAD had been +// verified earlier and the tag also needs to be verified now. +func requiresVerification(obj *sourcev1.GitRepository) bool { + if obj.Spec.Verification != nil { + observedMode := obj.Status.SourceVerificationMode + mode := obj.Spec.Verification.GetMode() + if observedMode == nil { + return true + } + if (*observedMode == sourcev1.ModeGitTag && (mode == sourcev1.ModeGitHEAD || mode == sourcev1.ModeGitTagAndHEAD)) || + (*observedMode == sourcev1.ModeGitHEAD && (mode == sourcev1.ModeGitTag || mode == sourcev1.ModeGitTagAndHEAD)) { + return true + } + } + return false +} diff --git a/internal/controller/gitrepository_controller_fuzz_test.go b/internal/controller/gitrepository_controller_fuzz_test.go new file mode 100644 index 000000000..c9c136820 --- /dev/null +++ b/internal/controller/gitrepository_controller_fuzz_test.go @@ -0,0 +1,535 @@ +//go:build gofuzz_libfuzzer +// +build gofuzz_libfuzzer + +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "crypto/tls" + "crypto/x509" + "embed" + "errors" + "fmt" + "io/fs" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/memfs" + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + gitclient "github.com/go-git/go-git/v5/plumbing/transport/client" + httptransport "github.com/go-git/go-git/v5/plumbing/transport/http" + "github.com/go-git/go-git/v5/storage/memory" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + + intstorage "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/gittestserver" + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +var ( + noOfCreatedFiles = 0 + interval = time.Millisecond * 10 + indexInterval = time.Millisecond * 10 + pullInterval = time.Second * 3 + initter sync.Once + gitServer *gittestserver.GitServer + k8sClient client.Client + cfg *rest.Config + testEnv *testenv.Environment + + storage *intstorage.Storage + + examplePublicKey []byte + examplePrivateKey []byte + exampleCA []byte +) + +//go:embed testdata/crd/*.yaml +//go:embed testdata/certs/* +var testFiles embed.FS + +const ( + defaultBinVersion = "1.24" + lettersAndNumbers = "abcdefghijklmnopqrstuvwxyz123456789" + lettersNumbersAndDash = "abcdefghijklmnopqrstuvwxyz123456789-" +) + +// FuzzRandomGitFiles implements a fuzzer that +// targets the GitRepository reconciler. +func FuzzRandomGitFiles(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + initter.Do(func() { + utilruntime.Must(ensureDependencies()) + }) + + f := fuzz.NewConsumer(data) + namespace, deleteNamespace, err := createNamespace(f) + if err != nil { + return + } + defer deleteNamespace() + + gitServerURL, stopGitServer := createGitServer(f) + defer stopGitServer() + + fs := memfs.New() + gitrepo, err := git.Init(memory.NewStorage(), fs) + if err != nil { + panic(err) + } + wt, err := gitrepo.Worktree() + if err != nil { + panic(err) + } + + // Create random files for the git source + err = createRandomFiles(f, fs, wt) + if err != nil { + return + } + + commit, err := pushFilesToGit(gitrepo, wt, gitServerURL.String()) + if err != nil { + return + } + created, err := createGitRepository(f, gitServerURL.String(), commit.String(), namespace.Name) + if err != nil { + return + } + err = k8sClient.Create(context.Background(), created) + if err != nil { + return + } + defer k8sClient.Delete(context.Background(), created) + + // Let the reconciler do its thing: + time.Sleep(60 * time.Millisecond) + }) +} + +// FuzzGitResourceObject implements a fuzzer that targets +// the GitRepository reconciler. +func FuzzGitResourceObject(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + initter.Do(func() { + utilruntime.Must(ensureDependencies()) + }) + + f := fuzz.NewConsumer(data) + + // Create this early because if it fails, then the fuzzer + // does not need to proceed. + repository := &sourcev1.GitRepository{} + err := f.GenerateStruct(repository) + if err != nil { + return + } + + metaName, err := f.GetStringFrom(lettersNumbersAndDash, 59) + if err != nil { + return + } + + gitServerURL, stopGitServer := createGitServer(f) + defer stopGitServer() + + fs := memfs.New() + gitrepo, err := git.Init(memory.NewStorage(), fs) + if err != nil { + return + } + wt, err := gitrepo.Worktree() + if err != nil { + return + } + + // Add a file + ff, _ := fs.Create("fixture") + _ = ff.Close() + _, err = wt.Add(fs.Join("fixture")) + if err != nil { + return + } + + commit, err := pushFilesToGit(gitrepo, wt, gitServerURL.String()) + if err != nil { + return + } + + namespace, deleteNamespace, err := createNamespace(f) + if err != nil { + return + } + defer deleteNamespace() + + repository.Spec.URL = gitServerURL.String() + repository.Spec.Verification.Mode = "head" + repository.Spec.SecretRef = nil + + reference := &sourcev1.GitRepositoryRef{Branch: "some-branch"} + reference.Commit = strings.Replace(reference.Commit, "", commit.String(), 1) + repository.Spec.Reference = reference + + repository.ObjectMeta = metav1.ObjectMeta{ + Name: metaName, + Namespace: namespace.Name, + } + err = k8sClient.Create(context.Background(), repository) + if err != nil { + return + } + defer k8sClient.Delete(context.Background(), repository) + + // Let the reconciler do its thing. + time.Sleep(50 * time.Millisecond) + }) +} + +func loadExampleKeys() (err error) { + examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + return err + } + examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + return err + } + exampleCA, err = os.ReadFile("testdata/certs/ca.pem") + return err +} + +// createGitRepository is a helper function to create GitRepository objects. +func createGitRepository(f *fuzz.ConsumeFuzzer, specUrl, commit, namespaceName string) (*sourcev1.GitRepository, error) { + reference := &sourcev1.GitRepositoryRef{Branch: "some-branch"} + reference.Commit = strings.Replace(reference.Commit, "", commit, 1) + nnID, err := f.GetStringFrom(lettersAndNumbers, 10) + if err != nil { + return &sourcev1.GitRepository{}, err + } + key := types.NamespacedName{ + Name: fmt.Sprintf("git-ref-test-%s", nnID), + Namespace: namespaceName, + } + + return &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Spec: sourcev1.GitRepositorySpec{ + URL: specUrl, + Interval: metav1.Duration{Duration: indexInterval}, + Reference: reference, + }, + }, nil +} + +// createNamespace is a helper function to create kubernetes namespaces. +func createNamespace(f *fuzz.ConsumeFuzzer) (*corev1.Namespace, func(), error) { + namespace := &corev1.Namespace{} + nnID, err := f.GetStringFrom(lettersAndNumbers, 10) + if err != nil { + return namespace, func() {}, err + } + namespace.ObjectMeta = metav1.ObjectMeta{Name: "git-repository-test" + nnID} + err = k8sClient.Create(context.Background(), namespace) + if err != nil { + return namespace, func() {}, err + } + return namespace, func() { + k8sClient.Delete(context.Background(), namespace) + }, nil +} + +// createGitServer is a helper function to create a git server. +func createGitServer(f *fuzz.ConsumeFuzzer) (*url.URL, func()) { + repoID, err := f.GetStringFrom(lettersAndNumbers, 10) + if err != nil { + return &url.URL{}, func() {} + } + gitServer, err := gittestserver.NewTempGitServer() + if err != nil { + panic(err) + } + gitServer.AutoCreate() + defer os.RemoveAll(gitServer.Root()) + + utilruntime.Must(gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com")) + + u, err := url.Parse(gitServer.HTTPAddress()) + if err != nil { + panic(err) + } + u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", repoID)) + return u, func() { gitServer.StopHTTP() } +} + +// pushFilesToGit is a helper function to push files to a git server. +func pushFilesToGit(gitrepo *git.Repository, wt *git.Worktree, gitServerURL string) (plumbing.Hash, error) { + commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ + Name: "John Doe", + Email: "john@example.com", + When: time.Now(), + }}) + if err != nil { + return plumbing.ZeroHash, err + } + hRef := plumbing.NewHashReference(plumbing.ReferenceName("refs/heads/some-branch"), commit) + err = gitrepo.Storer.SetReference(hRef) + if err != nil { + return plumbing.ZeroHash, err + } + + remote, err := gitrepo.CreateRemote(&config.RemoteConfig{ + Name: "origin", + URLs: []string{gitServerURL}, + }) + if err != nil { + return plumbing.ZeroHash, err + } + err = remote.Push(&git.PushOptions{ + RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, + }) + if err != nil { + return plumbing.ZeroHash, err + } + return commit, nil + +} + +// createRandomFiles is a helper function to create files in a billy.Filesystem. +func createRandomFiles(f *fuzz.ConsumeFuzzer, fs billy.Filesystem, wt *git.Worktree) error { + numberOfFiles, err := f.GetInt() + if err != nil { + return err + } + maxNumberOfFiles := 4000 // This number is completely arbitrary + if numberOfFiles%maxNumberOfFiles == 0 { + return errors.New("We don't want to create 0 files...") + } + + for i := 0; i < numberOfFiles%maxNumberOfFiles; i++ { + dirPath, err := f.GetString() + if err != nil { + return err + } + + // Check for ".." cases + if strings.Contains(dirPath, "..") { + return errors.New("Dir contains '..'") + } + + err = fs.MkdirAll(dirPath, 0o700) + if err != nil { + return errors.New("Could not create the subDir") + } + fileName, err := f.GetString() + if err != nil { + return errors.New("Could not get fileName") + } + fullFilePath := fs.Join(dirPath, fileName) + + fileContents, err := f.GetBytes() + if err != nil { + return errors.New("Could not create the subDir") + } + + createdFile, err := fs.Create(fullFilePath) + if err != nil { + return errors.New("Could not create the subDir") + } + _, err = createdFile.Write(fileContents) + if err != nil { + createdFile.Close() + return errors.New("Could not create the subDir") + } + createdFile.Close() + _, err = wt.Add(fullFilePath) + if err != nil { + panic(err) + } + noOfCreatedFiles++ + } + return nil +} + +func envtestBinVersion() string { + if binVersion := os.Getenv("ENVTEST_BIN_VERSION"); binVersion != "" { + return binVersion + } + return defaultBinVersion +} + +func ensureDependencies() error { + if _, err := os.Stat("/.dockerenv"); os.IsNotExist(err) { + return nil + } + + if os.Getenv("KUBEBUILDER_ASSETS") == "" { + binVersion := envtestBinVersion() + cmd := exec.Command("/usr/bin/bash", "-c", fmt.Sprintf(`go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \ + /root/go/bin/setup-envtest use -p path %s`, binVersion)) + + cmd.Env = append(os.Environ(), "GOPATH=/root/go") + assetsPath, err := cmd.Output() + if err != nil { + return err + } + os.Setenv("KUBEBUILDER_ASSETS", string(assetsPath)) + } + + // Output all embedded testdata files + embedDirs := []string{"testdata/crd", "testdata/certs"} + for _, dir := range embedDirs { + err := os.MkdirAll(dir, 0o700) + if err != nil { + return fmt.Errorf("mkdir %s: %v", dir, err) + } + + templates, err := fs.ReadDir(testFiles, dir) + if err != nil { + return fmt.Errorf("reading embedded dir: %v", err) + } + + for _, template := range templates { + fileName := fmt.Sprintf("%s/%s", dir, template.Name()) + fmt.Println(fileName) + + data, err := testFiles.ReadFile(fileName) + if err != nil { + return fmt.Errorf("reading embedded file %s: %v", fileName, err) + } + + os.WriteFile(fileName, data, 0o600) + if err != nil { + return fmt.Errorf("writing %s: %v", fileName, err) + } + } + } + + startEnvServer(func(m manager.Manager) { + utilruntime.Must((&GitRepositoryReconciler{ + Client: m.GetClient(), + Storage: storage, + }).SetupWithManagerAndOptions(m, GitRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + })) + }) + + return nil +} + +func startEnvServer(setupReconcilers func(manager.Manager)) *envtest.Environment { + testEnv := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("testdata", "crd")}, + } + fmt.Println("Starting the test environment") + cfg, err := testEnv.Start() + if err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } + + utilruntime.Must(loadExampleKeys()) + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + if err != nil { + panic(err) + } + defer os.RemoveAll(tmpStoragePath) + storage, err = intstorage.New(tmpStoragePath, "localhost:5050", time.Minute*1, 2) + if err != nil { + panic(err) + } + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + cert, err := tls.X509KeyPair(examplePublicKey, examplePrivateKey) + if err != nil { + panic(err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(exampleCA) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + } + tlsConfig.BuildNameToCertificate() + + var transport = httptransport.NewClient(&http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + }) + gitclient.InstallProtocol("https", transport) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + if err != nil { + panic(err) + } + if k8sClient == nil { + panic("cfg is nil but should not be") + } + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + if err != nil { + panic(err) + } + + setupReconcilers(k8sManager) + + time.Sleep(2 * time.Second) + go func() { + fmt.Println("Starting k8sManager...") + utilruntime.Must(k8sManager.Start(context.TODO())) + }() + + return testEnv +} diff --git a/internal/controller/gitrepository_controller_test.go b/internal/controller/gitrepository_controller_test.go new file mode 100644 index 000000000..f9f7a591d --- /dev/null +++ b/internal/controller/gitrepository_controller_test.go @@ -0,0 +1,3606 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/go-git/go-billy/v5/memfs" + gogit "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/storage/memory" + . "github.com/onsi/gomega" + sshtestdata "golang.org/x/crypto/ssh/testdata" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/git/github" + "github.com/fluxcd/pkg/gittestserver" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/ssh" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/features" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +const ( + encodedCommitFixture = `tree 35f0b28987e60d4b8dec1f707fd07fef5ad84abc +parent 8b52742dbc848eb0975e62ae00fbfa4f8108e835 +author Sanskar Jaiswal 1691045123 +0530 +committer Sanskar Jaiswal 1691068951 +0530 + +git/e2e: disable CGO while running e2e tests + +Disable CGO for Git e2e tests as it was originially required because of +our libgit2 client. Since we no longer maintain a libgit2 client, there +is no need to run the tests with CGO enabled. + +Signed-off-by: Sanskar Jaiswal +` + + malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879 +author Stefan Prodan 1633681364 +0300 +committer Stefan Prodan 1633681364 +0300 + +Update containerd and runc to fix CVEs + +Signed-off-by: Stefan Prodan +` + signatureCommitFixture = `-----BEGIN PGP SIGNATURE----- + +iQIzBAABCAAdFiEEOxEY0f3iSZ5rKQ+vWYLQJ5wif/0FAmTLqnEACgkQWYLQJ5wi +f/1mYw/+LRttvfPrfYl7ASUBGYSQuDzjeold8OO1LpmwjrKPpX4ivZbXHh+lJF0F +fqudKuJfJzeQCHsMZjnfgvXHd2VvxPh1jX6h3JLuNu7d4g1DtNQsKJtsLx7JW99X +J9Bb1xj0Ghh2PkrWEB9vpw+uZz4IhFrB+DNNLRNBkon3etrS1q57q8dhQFIhLI1y +ij3rq3kFHjrNNdokIv2ujyVJtWgy2fK2ELW5v2dznpykOo7hQEKgtOIHPBzGBFT0 +dUFjB99Qy4Qgjh3vWaY4fZ3u/vhp3swmw91OlDkFeyndWjDSZhzYnb7wY+U6z35C +aU4Gzc71CquSd/nTdOEkpuolBVWV5cBkM+Nxi8jtVGBeDDFE49j27a3lQ3+qtT7/ +q4FCe5Jw3GSOJvaLBLGmYVn9fc49t/28b5tkGtCHs3ATpsJohzELEIiDP90Me7hQ +Joks3ML38T4J/zZ4/ObbVMkrCEATYe3r1Ep7+e6VmOG9iTg0JIexexddjHX26Tgu +iuVP2GD/8PceqgNW/LPX84Ub32WTKPZJg+NyliDjH5QOvmguK1dRtSb/9eyYcoSF +Fkf0HcgG5jOk0OZJv0QcqXd9PhB4oXeuXgGszo9M+fhr3nWvEooAJtIyLtVtt/u2 +rNNB7xkZ1uWx+52w9RG2gmZh+LaESwd1rNXgUFLNBebNN3jNzsA= +=73xf +-----END PGP SIGNATURE-----` + + encodedTagFixture = `object 11525516bd55152ce68848bb14680aad43f18479 +type commit +tag v0.1.0 +tagger Sanskar Jaiswal 1691132850 +0530 + +v0.1.0 +` + + malformedEncodedTagFixture = `object 11525516bd55152ce68848bb14680aad43f18479 +tagger Sanskar Jaiswal 1691132850 +0530 + +v0.1.0 +` + + signatureTagFixture = `-----BEGIN PGP SIGNATURE----- + +iQIzBAABCAAdFiEEOxEY0f3iSZ5rKQ+vWYLQJ5wif/0FAmTMo7IACgkQWYLQJ5wi +f/1uUQ/9F70u8LZZQ3+U2vuYQ8fyVp/AV5h5zwxK5UlkR1crB0gSpdaiIxMMQRc8 +4QQIqCXloSHherUu9SPbDe9Qmr0JL8a57XqThjUSa52IYMDVos9sYwViJit+xGyz +HDot2nQ8MAqkDaiuwAnTqOyTPA89U36lGV/X/25mYxAuED+8xFx1OfvjGkX2eMEr +peWJ8VEfdFr2OmWwFceh6iF/izIaZGttwCyNy4BIh2W0GvUtQAxzqF4IzUvwfJU/ +bgARaHKQhWqFhDNImttsqJBweWavEDDmUgNg80c3cUZKqBtAjElToP9gis/SnPH5 +zaCAH66OzyKIhn6lde7KpOzyqbOyzddTa8SKkAAHyO7onukOktV8W9toeAxlF20q +Bw0MZGzAGisF8EK1HVv8UzrW9vAwdJN/yDIHWkjaeHr2FHmeV3a2QxH9PdwbE3tI +B21TCVULJuM8oR0ZG62xzg5ba5HiZMiilNMJdrBfjk5xYGk3LQU1gB4FVYa7yTsN +YfAokYtUIG187Qb8vPr1P95TzZxKdb7r/PAKEbGPro5D2Rri8OnxO/OaXG/giWS5 +5gRGmsQjvMsbzE/2PVc9+jshtZM49xL9H3DMjAWtO6MFbOqGqdi4MBa0T4qj6sZz +AbSLuRIBpXDES86faDXLRmufc95+iA/fh7W23G6vmd+SjXnCcHc= +=o4nf +-----END PGP SIGNATURE----- +` + + armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGQmiZ0BEACwsubUFoWtp6iJDK9oUN4RhPS0bAKpcRTa7P/rTCD/MbTMYdWC +4vod3FMm4+rNF0SESxY67MGmR4M3dSyOZkCijqHm9jDVOvN847LOl5bntkm8Euxm +LkpfsBWng09+gtfwuKxOxPMY017D1jM23OGbrqznHaokerFeDp9sJf1C7Z9jVf39 +oB/MF0bMdUJuxFFBdpoI73DORlAVUI14mfDbFj7v02Spkv1hqS2LtJ/Jl4QR/Vw4 +mR71aFmGFWqLBlkUOjJ2SZGkCmF/qbUdLmVb7yZUtqtua4DVkBPTORfOMhGDbrME +Nmb6Ft5neZwU0ETsT/oc6Np+PDFSUDBxu0CbKG6bw7N2y8RfiVJTaoNLFoFGV5dA +K8OpyTxU4IEPDMpkWs7tpRxPCC02uCfyqlvdF4EURXYXTj54DDLOGQjoqB+iGtVi +y2dQ4cuNhfuIFCFTA16s41DwmB0fQuOg3yfPPo7+jUefD+iAt3CZ9Guvu5+/mGyq +KxSBBRFHc8ED/L7JLPMU6tZglaPch9P4H6Fi2swDryyZQn/a2kYanEh9v1wL94L4 +3gUdjIYP8kjfg7nnS2FX9hl5FtPeM3jvnWjfv9jR+c8HWQZY2wM3Rj5iulu70K2U +pkdRUN0p2D5+Kq6idNreNoPlpQGoUOYrtAfOwtDFgMwuOZ78XkSIbFhtgwARAQAB +tEVTYW5za2FyIEphaXN3YWwgKEdpdEh1YiBHUEcgc2lnaW5nIGtleSkgPGphaXN3 +YWxzYW5za2FyMDc4QGdtYWlsLmNvbT6JAk4EEwEIADgWIQQ7ERjR/eJJnmspD69Z +gtAnnCJ//QUCZCaJnQIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRBZgtAn +nCJ//dF4D/0Tl5Wre6KrZvjDs5loulhN8YMYb63jr+x1eVkpMpta51XvZvkZFoiY +9T4MQX+qgAkTrUJsxgWUwtVtDfmbyLXodDRS6JUbCRiMu12VD7mNT+lUfuhR2sJv +rHZoolQp7X4DTea1R64PcttfmlGO2pUNpGNmhojO0PahXqOCHmEUWBJQhI8RvOcs +zRjEzDcAcEgtMGzamq6DR54YxyzGE8V9b5WD/elmEXM6uWW+CkfX8WskKbLdRY0t ++GQ1pOtf3tKxD46I3LIsUEwbyh4Dv4vJbZmyxjI+FKbSCW5tMrz/ZWrPNl0m+pDI +Yn0+GWed2pgTMFh3VAhYCyIVugKynlaToH+D2z3DnuEp3Jfs+b1BdirS/PW79tW7 +rjCJzqofF2UPyK0mzdYL+P3k9Hip5J0bCGoeMdCLsP5fYq3Y1YS4bH4JkDm52y+r +y89AH4LHHQt+A7w19I+6M2jmcNnDUMrpuSo84GeoM59O3fU7hLCC1Jx4hj7EBRrb +QzY5FInrE/WTcgFRljK46zhW4ybmfak/xJV654UqJCDWlVbc68D8JrKNQOj7gdPs +zh1+m2pFDEhWZkaFtQbSEpXMIJ9DsCoyQL4Knl+89VxHsrIyAJsmGb3V8xvtv5w9 +QuWtsDnYbvDHtTpu1NZChVrnr/l1k3C2fcLhV1s583AvhGMkbgSXkQ== +=Tdjz +-----END PGP PUBLIC KEY BLOCK----- +` +) + +func TestGitRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "gitrepo-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + gitRepo := &sourcev1.GitRepository{} + gitRepo.Name = "test-gitrepo" + gitRepo.Namespace = namespaceName + gitRepo.Spec = sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: "https://example.com", + } + // Add a test finalizer to prevent the object from getting deleted. + gitRepo.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, gitRepo)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, gitRepo)).NotTo(HaveOccurred()) + + r := &GitRepositoryReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(gitRepo)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestGitRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + origObj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "gitrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: server.HTTPAddress() + repoPath, + }, + } + obj := origObj.DeepCopy() + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for GitRepository to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: gitRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for GitRepository to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) +} + +func TestGitRepositoryReconciler_reconcileSource_emptyRepository(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "empty-", + Generation: 1, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + URL: server.HTTPAddress() + "/test.git", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var commit git.Commit + var includes artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, &commit, &includes, t.TempDir()) + assertConditions := []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "EmptyGitRepository", "git repository is empty"), + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(assertConditions)) + g.Expect(err).To(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(commit).ToNot(BeNil()) +} + +func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + protocol string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + secretFunc func(secret *corev1.Secret, baseURL string) + middlewareFunc gittestserver.HTTPMiddleware + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without secretRef makes Reconciling=True", + protocol: "http", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTP with Basic Auth secret makes Reconciling=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with mutual TLS makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": clientPrivateKey, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "mtls-certs"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with mutual TLS and invalid private key makes CheckoutFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-mtls-certs"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "tls: failed to find any PEM data in key input"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + name: "HTTPS with CAFile secret makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with CAFile secret with both ca.crt and caFile keys makes Reconciling=True and ignores caFile", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + // The expected error messages may differ when in darwin. In some cases it will match the + // error message expected in linux: "x509: certificate signed by unknown authority". In + // other cases it may get "x509: “example.com” certificate is not standards compliant" instead. + // + // Trimming the expected error message for consistent results. + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "x509: "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + name: "mTLS GitHub App without ca.crt makes FetchFailed=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-no-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-no-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo") + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + // should record a FetchFailedCondition due to TLS handshake + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "x509: "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo"), + }, + }, + { + name: "mTLS GitHub App with ca.crt makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + username: github.AccessTokenUsername, + password: "some-enterprise-token", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-ca"} + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + secret.Data["ca.crt"] = tlsCA + }, + middlewareFunc: func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/api/v3/app/installations/") { + w.WriteHeader(http.StatusOK) + tok := &github.AppToken{ + Token: "some-enterprise-token", + ExpiresAt: time.Now().Add(time.Hour), + } + _ = json.NewEncoder(w).Encode(tok) + } + handler.ServeHTTP(w, r) + }) + }, + wantErr: false, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + // TODO: Add test case for HTTPS with bearer token auth secret. It + // depends on gitkit to have support for bearer token based + // authentication. + { + name: "SSH with private key secret makes Reconciling=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:"), + }, + }, + { + name: "SSH with password protected private key secret makes Reconciling=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMEncryptedKeys[2].PEMBytes, + "password": []byte("password"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "Include get failure makes CheckoutFailed=True and returns error", + protocol: "http", + server: options{ + username: "git", + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/non-existing': secrets \"non-existing\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging/some-revision", + Path: randStringRunes(10), + }, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master@sha1:'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without secret ref makes FetchFailed=True", + protocol: "http", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "empty provider with github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-app-secret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("1111"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-app-secret"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef '/github-app-secret' has github app data but provider is not set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-basic-auth"} + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + Generation: 1, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + + if tt.middlewareFunc != nil { + server.AddHTTPMiddlewares(tt.middlewareFunc) + } + + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + if len(tt.server.username+tt.server.password) > 0 { + server.Auth(tt.server.username, tt.server.password) + } + + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + obj.Spec.URL = server.HTTPAddress() + repoPath + case "https": + g.Expect(server.StartHTTPS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + obj.Spec.URL = server.HTTPAddress() + repoPath + case "ssh": + server.KeyDir(filepath.Join(server.Root(), "keys")) + + g.Expect(server.ListenSSH()).To(Succeed()) + obj.Spec.URL = server.SSHAddress() + repoPath + + go func() { + server.StartSSH() + }() + defer server.StopSSH() + + if secret != nil && len(secret.Data["known_hosts"]) == 0 { + u, err := url.Parse(obj.Spec.URL) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(u.Host).ToNot(BeEmpty()) + knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false) + g.Expect(err).NotTo(HaveOccurred()) + secret.Data["known_hosts"] = knownHosts + } + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.secretFunc != nil { + tt.secretFunc(secret, server.HTTPAddress()) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if secret != nil { + clientBuilder.WithObjects(secret.DeepCopy()) + } + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + tmpDir := t.TempDir() + + head, _ := localRepo.Head() + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", head.Hash().String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var commit git.Commit + var includes artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, &commit, &includes, tmpDir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(commit).ToNot(BeNil()) + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { + tests := []struct { + name string + url string + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + wantErr string + }{ + { + name: "azure provider", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + }, + wantErr: "ManagedIdentityCredential", + }, + { + name: "azure provider with service account and feature gate for object-level identity disabled", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + obj.Spec.ServiceAccountName = "azure-sa" + }, + wantErr: auth.FeatureGateObjectLevelWorkloadIdentity, + }, + { + name: "github provider with no secret ref", + url: "https://github.com/org/repo.git", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", + }, + { + name: "github provider with github app data in secret", + url: "https://example.com/org/repo", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "githubAppSecret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: []byte("abc"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "githubAppSecret", + } + }, + wantErr: "Key must be a PEM encoded PKCS1 or PKCS8 key", + }, + { + name: "generic provider with github app data in secret", + url: "https://example.com/org/repo", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "githubAppSecret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGeneric + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "githubAppSecret", + } + }, + wantErr: "secretRef '/githubAppSecret' has github app data but provider is not set to github", + }, + { + name: "github provider with basic auth secret", + url: "https://github.com/org/repo.git", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth-secret", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "basic-auth-secret", + } + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", + }, + { + name: "generic provider", + url: "https://example.com/org/repo", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGeneric + }, + }, + { + name: "secret ref defined for non existing secret", + url: "https://github.com/org/repo.git", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "authSecret", + } + }, + wantErr: "failed to get secret '/authSecret': secrets \"authSecret\" not found", + }, + { + url: "https://example.com/org/repo", + name: "no provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + obj := &sourcev1.GitRepository{} + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + url, err := url.Parse(tt.url) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + opts, err := r.getAuthOpts(ctx, obj, *url, nil) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(opts).ToNot(BeNil()) + g.Expect(opts.BearerToken).To(BeEmpty()) + g.Expect(opts.Username).To(BeEmpty()) + g.Expect(opts.Password).To(BeEmpty()) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) { + g := NewWithT(t) + + branches := []string{"staging"} + tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"} + refs := []string{"refs/pull/420/head"} + + tests := []struct { + name string + reference *sourcev1.GitRepositoryRef + beforeFunc func(obj *sourcev1.GitRepository, latestRev string) + want sreconcile.Result + wantErr bool + wantRevision string + wantArtifactOutdated bool + wantReconciling bool + }{ + { + name: "Nil reference (default branch)", + want: sreconcile.ResultSuccess, + wantRevision: "master@sha1:", + wantReconciling: true, + }, + { + name: "Branch", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantReconciling: true, + }, + { + name: "Tag", + reference: &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + }, + want: sreconcile.ResultSuccess, + wantRevision: "v0.1.0@sha1:", + wantReconciling: true, + }, + { + name: "Branch commit", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + Commit: "", + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantReconciling: true, + }, + { + name: "Ref Name pointing to a branch", + reference: &sourcev1.GitRepositoryRef{ + Name: "refs/heads/staging", + }, + want: sreconcile.ResultSuccess, + wantRevision: "refs/heads/staging@sha1:", + wantReconciling: true, + }, + { + name: "Ref Name pointing to a PR", + reference: &sourcev1.GitRepositoryRef{ + Name: "refs/pull/420/head", + }, + want: sreconcile.ResultSuccess, + wantRevision: "refs/pull/420/head@sha1:", + wantReconciling: true, + }, + { + name: "SemVer", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "*", + }, + want: sreconcile.ResultSuccess, + wantRevision: "v2.0.0@sha1:", + wantReconciling: true, + }, + { + name: "SemVer range", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "", + wantReconciling: true, + }, + { + name: "SemVer prerelease", + reference: &sourcev1.GitRepositoryRef{ + SemVer: ">=1.0.0-0 <1.1.0-0", + }, + wantRevision: "v1.0.0-alpha@sha1:", + want: sreconcile.ResultSuccess, + wantReconciling: true, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging/some-revision", + Path: randStringRunes(10), + }, + } + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantArtifactOutdated: true, + wantReconciling: true, + }, + { + name: "Optimized clone", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { + // Add existing artifact on the object and storage. + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging@sha1:" + latestRev, + Path: randStringRunes(10), + }, + } + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantRevision: "staging@sha1:", + wantReconciling: false, + }, + { + name: "Optimized clone different ignore", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { + // Set new ignore value. + obj.Spec.Ignore = ptr.To("foo") + // Add existing artifact on the object and storage. + obj.Status = sourcev1.GitRepositoryStatus{ + Artifact: &meta.Artifact{ + Revision: "staging@sha1:" + latestRev, + Path: randStringRunes(10), + }, + } + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging@sha1:", + wantReconciling: false, + }, + } + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).To(BeNil()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + headRef, err := localRepo.Head() + g.Expect(err).NotTo(HaveOccurred()) + + for _, branch := range branches { + g.Expect(remoteBranchForHead(localRepo, headRef, branch)).To(Succeed()) + } + for _, tag := range tags { + g.Expect(remoteTagForHead(localRepo, headRef, tag)).To(Succeed()) + } + + for _, ref := range refs { + g.Expect(remoteRefForHead(localRepo, headRef, ref)).To(Succeed()) + } + + r := &GitRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "checkout-strategy-", + Generation: 1, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + URL: server.HTTPAddress() + repoPath, + Reference: tt.reference, + }, + } + + if obj.Spec.Reference != nil && obj.Spec.Reference.Commit == "" { + obj.Spec.Reference.Commit = headRef.Hash().String() + } + + tmpDir := t.TempDir() + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, headRef.Hash().String()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var commit git.Commit + var includes artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + got, err := r.reconcileSource(ctx, sp, obj, &commit, &includes, tmpDir) + if err != nil && !tt.wantErr { + t.Log(err) + } + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + if tt.wantRevision != "" && !tt.wantErr { + revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String()) + g.Expect(commitReference(obj, &commit)).To(Equal(revision)) + g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(Equal(tt.wantArtifactOutdated)) + g.Expect(conditions.IsTrue(obj, meta.ReconcilingCondition)).To(Equal(tt.wantReconciling)) + } + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + dir string + includes artifactSet + beforeFunc func(obj *sourcev1.GitRepository) + afterFunc func(t *WithT, obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes ArtifactInStorage=True", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Archiving artifact to storage with includes makes ArtifactInStorage=True", + dir: "testdata/git/repository", + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Include = []sourcev1.GitRepositoryInclude{ + {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}}, + } + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:34d9af1a2fcfaef3ee9487d67dc2d642bc7babdb9444a5f60d1f32df32e4de7d")) + t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + dir: "testdata/git/repository", + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Include = []sourcev1.GitRepositoryInclude{ + {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}}, + } + obj.Status.Artifact = &meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"} + obj.Status.IncludedArtifacts = []*meta.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}} + obj.Status.ObservedInclude = obj.Spec.Include + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Spec ignore overwrite is taken into account", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Ignore = ptr.To("!**.txt\n") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:a17037f96f541a47bdadcd12ab40b943c50a9ffd25dc8a30a5e9af52971fd94f")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Source ignore for subdir ignore patterns", + dir: "testdata/git/repowithsubdirs", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:ad9943d761b30e943e2a770ea9083a40fc03f09846efd61f6c442cc48fefad11")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:34d9af1a2fcfaef3ee9487d67dc2d642bc7babdb9444a5f60d1f32df32e4de7d")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"), + }, + }, + { + name: "Target path does not exists", + dir: "testdata/git/foo", + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat target artifact path"), + }, + }, + { + name: "Target path is not a directory", + dir: "testdata/git/repository/foo.txt", + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "invalid target path"), + }, + }, + } + artifactSize := func(g *WithT, artifactURL string) *int64 { + if artifactURL == "" { + return nil + } + res, err := http.Get(artifactURL) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.StatusCode).To(Equal(http.StatusOK)) + defer res.Body.Close() + return &res.ContentLength + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + resetChmod(tt.dir, 0o750, 0o600) + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "reconcile-artifact-", + Generation: 1, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + commit := git.Commit{ + Hash: []byte("b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"), + Reference: "refs/heads/main", + } + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(ctx, sp, obj, &commit, &tt.includes, tt.dir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if obj.Status.Artifact != nil { + g.Expect(obj.Status.Artifact.Size).To(Equal(artifactSize(g, obj.Status.Artifact.URL))) + } + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { + g := NewWithT(t) + + server, err := testserver.NewTempArtifactServer() + g.Expect(err).NotTo(HaveOccurred()) + server.Start() + defer server.Stop() + storage, err := newTestStorage(server.HTTPServer) + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(storage.BasePath) + + dependencyInterval := 5 * time.Second + + type dependency struct { + name string + withArtifact bool + conditions []metav1.Condition + } + + type include struct { + name string + fromPath string + toPath string + shouldExist bool + } + + tests := []struct { + name string + dependencies []dependency + includes []include + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "New includes make ArtifactOutdated=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), + }, + }, + { + name: "b", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/", shouldExist: true}, + {name: "b", toPath: "b/", shouldExist: true}, + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Invalid FromPath makes IncludeUnavailable=True and returns error", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + }, + }, + includes: []include{ + {name: "a", fromPath: "../../../path", shouldExist: false}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, "CopyFailure", "unpack/path: no such file or directory"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var depObjs []client.Object + for _, d := range tt.dependencies { + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.name, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: d.conditions, + }, + } + if d.withArtifact { + obj.Status.Artifact = &meta.Artifact{ + Path: d.name + ".tar.gz", + Revision: d.name, + LastUpdateTime: metav1.Now(), + } + g.Expect(storage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)).To(Succeed()) + } + depObjs = append(depObjs, obj) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if len(tt.dependencies) > 0 { + clientBuilder.WithObjects(depObjs...) + } + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, + requeueDependency: dependencyInterval, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-include", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + }, + } + + for i, incl := range tt.includes { + incl := sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, + FromPath: incl.fromPath, + ToPath: incl.toPath, + } + tt.includes[i].fromPath = incl.GetFromPath() + tt.includes[i].toPath = incl.GetToPath() + obj.Spec.Include = append(obj.Spec.Include, incl) + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + tmpDir := t.TempDir() + + var commit git.Commit + var includes artifactSet + + // Build includes artifactSet. + artifactSet, err := r.fetchIncludes(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + includes = *artifactSet + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileInclude(ctx, sp, obj, &commit, &includes, tmpDir) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + if err == nil { + g.Expect(len(includes)).To(Equal(len(tt.includes))) + } + g.Expect(got).To(Equal(tt.want)) + for _, i := range tt.includes { + if i.toPath != "" { + expect := g.Expect(filepath.Join(tmpDir, i.toPath)) + if i.shouldExist { + expect.To(BeADirectory()) + } else { + expect.NotTo(BeADirectory()) + } + } + if i.shouldExist { + g.Expect(filepath.Join(tmpDir, i.toPath)).Should(BeADirectory()) + } else { + g.Expect(filepath.Join(tmpDir, i.toPath)).ShouldNot(BeADirectory()) + } + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "e", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + defer func() { + g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) + }() + + r := &GitRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var c *git.Commit + var as artifactSet + sp := patch.NewSerialPatcher(obj, r.Client) + got, err := r.reconcileStorage(context.TODO(), sp, obj, c, &as, "") + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.GitRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestGitRepositoryReconciler_verifySignature(t *testing.T) { + tests := []struct { + name string + secret *corev1.Secret + commit git.Commit + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + err error + wantSourceVerificationMode *sourcev1.GitVerificationMode + assertConditions []metav1.Condition + }{ + { + name: "Valid commit with mode=HEAD makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- commit 'shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Valid commit with mode=head makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- commit 'shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Valid tag with mode=tag makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTag), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- tag 'v0.1.0@shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Valid tag and commit with mode=TagAndHEAD makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + wantSourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTagAndHEAD), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of\n\t- tag 'v0.1.0@shasum' with key '5982D0279C227FFD'\n\t- commit 'shasum' with key '5982D0279C227FFD'"), + }, + }, + { + name: "Source verification mode in status is unset if there's no verification in spec", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.SourceVerificationMode = ptrToVerificationMode(sourcev1.ModeGitHEAD) + obj.Spec.Verification = nil + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Verification of tag with no tag ref SourceVerifiedCondition=False and returns a stalling error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Branch: "main", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + err: serror.NewStalling( + errors.New("cannot verify tag object's signature if a tag reference is not specified"), + "InvalidVerificationMode", + ), + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidVerificationMode", "cannot verify tag object's signature if a tag reference is not specified"), + }, + }, + { + name: "Unsigned tag with mode=tag makes SourceVerifiedCondition=False", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidGitObject", "cannot verify signature of tag 'v0.1.0@shasum' since it is not signed"), + }, + }, + { + name: "Partially successful verification makes SourceVerifiedCondition=False", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(encodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: unable to verify Git commit: unable to verify payload with any of the given key rings"), + }, + }, + { + name: "Invalid commit makes SourceVerifiedCondition=False and returns error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: unable to verify Git commit: unable to verify payload with any of the given key rings"), + }, + }, + { + name: "Invalid tag signature with mode=tag makes SourceVerifiedCondition=False", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + ReferencingTag: &git.Tag{ + Name: "v0.1.0", + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedTagFixture), + Signature: signatureTagFixture, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Reference = &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidTagSignature", "signature verification of tag 'v0.1.0@shasum' failed: unable to verify Git tag: unable to verify payload with any of the given key rings"), + }, + }, + { + name: "Invalid PGP key makes SourceVerifiedCondition=False and returns error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid", + }, + Data: map[string][]byte{ + "foo": []byte("invalid PGP public key"), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "invalid", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: unable to verify Git commit: unable to read armored key ring: openpgp: invalid argument: no armored data found"), + }, + }, + { + name: "Secret get failure makes SourceVerifiedCondition=False and returns error", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + SecretRef: meta.LocalObjectReference{ + Name: "none-existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "VerificationError", "PGP public keys secret error: secrets \"none-existing\" not found"), + }, + }, + { + name: "Nil verification in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + { + name: "Empty verification mode in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-commit-", + Generation: 1, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + got, err := r.verifySignature(context.TODO(), obj, tt.commit) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + if tt.err != nil { + g.Expect(err).To(Equal(tt.err)) + } + g.Expect(got).To(Equal(tt.want)) + if tt.wantSourceVerificationMode != nil { + g.Expect(*obj.Status.SourceVerificationMode).To(Equal(*tt.wantSourceVerificationMode)) + } else { + g.Expect(obj.Status.SourceVerificationMode).To(BeNil()) + } + }) + } +} + +func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "no failure condition", + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "reconciling condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "stalled condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "mixed failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "reconciling and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "stalled and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "condition-update", + Namespace: "default", + Finalizers: []string{sourcev1.SourceFinalizer}, + }, + Spec: sourcev1.GitRepositorySpec{ + URL: server.HTTPAddress() + repoPath, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithObjects(obj). + WithStatusSubresource(&sourcev1.GitRepository{}) + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + key := client.ObjectKeyFromObject(obj) + res, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: key}) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(res).To(Equal(tt.want)) + + updatedObj := &sourcev1.GitRepository{} + err = r.Get(ctx, key, updatedObj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(updatedObj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +// helpers + +func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) { + fs := memfs.New() + repo, err := gogit.Init(memory.NewStorage(), fs) + if err != nil { + return nil, err + } + + branchRef := plumbing.NewBranchReferenceName(branch) + if err = repo.CreateBranch(&config.Branch{ + Name: branch, + Remote: gogit.DefaultRemoteName, + Merge: branchRef, + }); err != nil { + return nil, err + } + + err = commitFromFixture(repo, fixture) + if err != nil { + return nil, err + } + + if server.HTTPAddress() == "" { + if err = server.StartHTTP(); err != nil { + return nil, err + } + defer server.StopHTTP() + } + if _, err = repo.CreateRemote(&config.RemoteConfig{ + Name: gogit.DefaultRemoteName, + URLs: []string{server.HTTPAddressWithCredentials() + repositoryPath}, + }); err != nil { + return nil, err + } + + if err = repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"}, + }); err != nil { + return nil, err + } + + return repo, nil +} + +func Test_commitFromFixture(t *testing.T) { + g := NewWithT(t) + + repo, err := gogit.Init(memory.NewStorage(), memfs.New()) + g.Expect(err).ToNot(HaveOccurred()) + + err = commitFromFixture(repo, "testdata/git/repository") + g.Expect(err).ToNot(HaveOccurred()) +} + +func commitFromFixture(repo *gogit.Repository, fixture string) error { + working, err := repo.Worktree() + if err != nil { + return err + } + fs := working.Filesystem + + if err = filepath.Walk(fixture, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return fs.MkdirAll(fs.Join(path[len(fixture):]), info.Mode()) + } + + fileBytes, err := os.ReadFile(path) + if err != nil { + return err + } + + ff, err := fs.Create(path[len(fixture):]) + if err != nil { + return err + } + defer ff.Close() + + _, err = ff.Write(fileBytes) + return err + }); err != nil { + return err + } + + _, err = working.Add(".") + if err != nil { + return err + } + + if _, err = working.Commit("Fixtures from "+fixture, &gogit.CommitOptions{ + Author: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), + }, + }); err != nil { + return err + } + + return nil +} + +func remoteBranchForHead(repo *gogit.Repository, head *plumbing.Reference, branch string) error { + refSpec := fmt.Sprintf("%s:refs/heads/%s", head.Name(), branch) + return repo.Push(&gogit.PushOptions{ + RemoteName: "origin", + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + Force: true, + }) +} + +func remoteTagForHead(repo *gogit.Repository, head *plumbing.Reference, tag string) error { + if _, err := repo.CreateTag(tag, head.Hash(), &gogit.CreateTagOptions{ + // Not setting this seems to make things flaky + // Expected success, but got an error: + // <*errors.errorString | 0xc0000f6350>: { + // s: "tagger field is required", + // } + // tagger field is required + Tagger: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), + }, + Message: tag, + }); err != nil { + return err + } + refSpec := fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", tag) + return repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + }) +} + +func remoteRefForHead(repo *gogit.Repository, head *plumbing.Reference, reference string) error { + if err := repo.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName(reference), head.Hash())); err != nil { + return err + } + if err := repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{ + config.RefSpec("+" + reference + ":" + reference), + }, + }); err != nil { + return err + } + return nil +} + +func TestGitRepositoryReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "multiple positive conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit"), + }, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + TypeMeta: metav1.TypeMeta{ + APIVersion: sourcev1.GroupVersion.String(), + Kind: sourcev1.GitRepositoryKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepo", + Namespace: "foo", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithObjects(obj). + WithStatusSubresource(&sourcev1.GitRepository{}) + + c := clientBuilder.Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(gitRepositoryReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestGitRepositoryReconciler_notify(t *testing.T) { + concreteCommit := git.Commit{ + Hash: git.Hash("b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"), + Message: "test commit", + Encoded: []byte("content"), + } + partialCommit := git.Commit{ + Hash: git.Hash("b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"), + } + + noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason") + noopErr.Ignore = true + + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.GitRepository) + newObjBeforeFunc func(obj *sourcev1.GitRepository) + commit git.Commit + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + }, + commit: concreteCommit, + wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + commit: concreteCommit, + wantEvent: "Normal Succeeded stored artifact for commit 'test commit'", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + commit: concreteCommit, + wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + { + name: "no-op error result", + res: sreconcile.ResultEmpty, + resErr: noopErr, + oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.GitRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + commit: partialCommit, // no-op will always result in partial commit. + wantEvent: "Normal Succeeded stored artifact for commit 'sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.GitRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &GitRepositoryReconciler{ + EventRecorder: recorder, + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + reconciler.notify(ctx, oldObj, newObj, tt.commit, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { + type dependency struct { + name string + withArtifact bool + conditions []metav1.Condition + } + + type include struct { + name string + fromPath string + toPath string + shouldExist bool + } + + tests := []struct { + name string + dependencies []dependency + includes []include + beforeFunc func(obj *sourcev1.GitRepository) + wantErr bool + wantArtifactSet artifactSet + assertConditions []metav1.Condition + }{ + { + name: "Existing includes", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), + }, + }, + { + name: "b", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/", shouldExist: true}, + {name: "b", toPath: "b/", shouldExist: true}, + }, + wantErr: false, + wantArtifactSet: []*meta.Artifact{ + {Revision: "a"}, + {Revision: "b"}, + }, + }, + { + name: "Include get failure", + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), + }, + }, + { + name: "Include without an artifact makes IncludeUnavailable=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: false, + conditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"), + }, + }, + { + name: "Outdated IncludeUnavailable is removed", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "") + }, + assertConditions: []metav1.Condition{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var depObjs []client.Object + for _, d := range tt.dependencies { + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.name, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: d.conditions, + }, + } + if d.withArtifact { + obj.Status.Artifact = &meta.Artifact{ + Path: d.name + ".tar.gz", + Revision: d.name, + LastUpdateTime: metav1.Now(), + } + } + depObjs = append(depObjs, obj) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if len(tt.dependencies) > 0 { + clientBuilder.WithObjects(depObjs...) + } + + r := &GitRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-include", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + }, + } + + for i, incl := range tt.includes { + incl := sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, + FromPath: incl.fromPath, + ToPath: incl.toPath, + } + tt.includes[i].fromPath = incl.GetFromPath() + tt.includes[i].toPath = incl.GetToPath() + obj.Spec.Include = append(obj.Spec.Include, incl) + } + + gotArtifactSet, err := r.fetchIncludes(ctx, obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + if !tt.wantErr && gotArtifactSet != nil { + g.Expect(gotArtifactSet.Diff(tt.wantArtifactSet)).To(BeFalse()) + } + }) + } +} + +func resetChmod(path string, dirMode os.FileMode, fileMode os.FileMode) error { + err := filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() && info.Mode() != dirMode { + os.Chmod(path, dirMode) + } else if !info.IsDir() && info.Mode() != fileMode { + os.Chmod(path, fileMode) + } + return nil + }) + if err != nil { + return fmt.Errorf("cannot reset file permissions: %v", err) + } + + return nil +} + +func TestGitRepositoryIncludeEqual(t *testing.T) { + tests := []struct { + name string + a sourcev1.GitRepositoryInclude + b sourcev1.GitRepositoryInclude + want bool + }{ + { + name: "empty", + want: true, + }, + { + name: "different refs", + a: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + }, + b: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "bar"}, + }, + want: false, + }, + { + name: "same refs", + a: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + }, + b: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + }, + want: true, + }, + { + name: "different from paths", + a: sourcev1.GitRepositoryInclude{FromPath: "foo"}, + b: sourcev1.GitRepositoryInclude{FromPath: "bar"}, + want: false, + }, + { + name: "same from paths", + a: sourcev1.GitRepositoryInclude{FromPath: "foo"}, + b: sourcev1.GitRepositoryInclude{FromPath: "foo"}, + want: true, + }, + { + name: "different to paths", + a: sourcev1.GitRepositoryInclude{ToPath: "foo"}, + b: sourcev1.GitRepositoryInclude{ToPath: "bar"}, + want: false, + }, + { + name: "same to paths", + a: sourcev1.GitRepositoryInclude{ToPath: "foo"}, + b: sourcev1.GitRepositoryInclude{ToPath: "foo"}, + want: true, + }, + { + name: "same all", + a: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo-ref"}, + FromPath: "foo-path", + ToPath: "bar-path", + }, + b: sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: "foo-ref"}, + FromPath: "foo-path", + ToPath: "bar-path", + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(gitRepositoryIncludeEqual(tt.a, tt.b)).To(Equal(tt.want)) + }) + } +} + +func TestGitContentConfigChanged(t *testing.T) { + tests := []struct { + name string + obj sourcev1.GitRepository + artifacts []*meta.Artifact + want bool + }{ + { + name: "no content config", + want: false, + }, + { + name: "unobserved ignore", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{Ignore: ptr.To("foo")}, + }, + want: true, + }, + { + name: "observed ignore", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{Ignore: ptr.To("foo")}, + Status: sourcev1.GitRepositoryStatus{ObservedIgnore: ptr.To("foo")}, + }, + want: false, + }, + { + name: "unobserved recurse submodules", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{RecurseSubmodules: true}, + }, + want: true, + }, + { + name: "observed recurse submodules", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{RecurseSubmodules: true}, + Status: sourcev1.GitRepositoryStatus{ObservedRecurseSubmodules: true}, + }, + want: false, + }, + { + name: "unobserved sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c"}}, + }, + want: true, + }, + { + name: "unobserved case sensitive sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/Z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: true, + }, + { + name: "observed sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: false, + }, + { + name: "observed sparse checkout with leading slash", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + }, + want: false, + }, + { + name: "unobserved include", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, FromPath: "bar", ToPath: "baz"}, + }, + }, + }, + want: true, + }, + { + name: "observed include", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + want: false, + }, + { + name: "observed include but different artifact revision", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "ccc", Digest: "bbb"}, + }, + want: true, + }, + { + name: "observed include but different artifact digest", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "ddd"}, + }, + want: true, + }, + { + name: "observed include but updated spec", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + want: true, + }, + { + name: "different number of include and observed include", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + IncludedArtifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ccc"}, + }, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ddd"}, + }, + want: true, + }, + { + name: "different number of include and artifactset", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ccc"}, + }, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + want: true, + }, + { + name: "different number of include and included artifacts", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Include: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + ObservedInclude: []sourcev1.GitRepositoryInclude{ + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}, + FromPath: "bar", + ToPath: "baz", + }, + { + GitRepositoryRef: meta.LocalObjectReference{Name: "foo2"}, + FromPath: "bar", + ToPath: "baz", + }, + }, + IncludedArtifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + }, + }, + }, + artifacts: []*meta.Artifact{ + {Revision: "aaa", Digest: "bbb"}, + {Revision: "ccc", Digest: "ccc"}, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + includes := artifactSet(tt.artifacts) + g.Expect(gitContentConfigChanged(&tt.obj, &includes)).To(Equal(tt.want)) + }) + } +} + +func Test_requiresVerification(t *testing.T) { + tests := []struct { + name string + obj *sourcev1.GitRepository + want bool + }{ + { + name: "GitRepository without verification does not require verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{}, + }, + want: false, + }, + { + name: "GitRepository with verification and no observed verification mode in status requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{}, + }, + }, + want: true, + }, + { + name: "GitRepository with HEAD verification and a verified tag requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTag), + }, + }, + want: true, + }, + { + name: "GitRepository with tag and HEAD verification and a verified tag requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTag), + }, + }, + want: true, + }, + { + name: "GitRepository with tag verification and a verified HEAD requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + }, + }, + want: true, + }, + { + name: "GitRepository with tag and HEAD verification and a verified HEAD requires verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTagAndHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitHEAD), + }, + }, + want: true, + }, + { + name: "GitRepository with tag verification and a verified HEAD and tag does not require verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitTag, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTagAndHEAD), + }, + }, + want: false, + }, + { + name: "GitRepository with head verification and a verified HEAD and tag does not require verification", + obj: &sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{ + Verification: &sourcev1.GitRepositoryVerification{ + Mode: sourcev1.ModeGitHEAD, + }, + }, + Status: sourcev1.GitRepositoryStatus{ + SourceVerificationMode: ptrToVerificationMode(sourcev1.ModeGitTagAndHEAD), + }, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + verificationRequired := requiresVerification(tt.obj) + g.Expect(verificationRequired).To(Equal(tt.want)) + }) + } +} + +func ptrToVerificationMode(mode sourcev1.GitVerificationMode) *sourcev1.GitVerificationMode { + return &mode +} diff --git a/internal/controller/helmchart_controller.go b/internal/controller/helmchart_controller.go new file mode 100644 index 000000000..e969bf67a --- /dev/null +++ b/internal/controller/helmchart_controller.go @@ -0,0 +1,1457 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/opencontainers/go-digest" + "github.com/sigstore/cosign/v2/pkg/cosign" + helmgetter "helm.sh/helm/v3/pkg/getter" + helmreg "helm.sh/helm/v3/pkg/registry" + helmrepo "helm.sh/helm/v3/pkg/repo" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/tar" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/cache" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/helm/chart" + "github.com/fluxcd/source-controller/internal/helm/getter" + "github.com/fluxcd/source-controller/internal/helm/repository" + soci "github.com/fluxcd/source-controller/internal/oci" + scosign "github.com/fluxcd/source-controller/internal/oci/cosign" + "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" +) + +// helmChartReadyCondition contains all the conditions information +// needed for HelmChart Ready status conditions summary calculation. +var helmChartReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.BuildFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.BuildFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.BuildFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// helmChartFailConditions contains the conditions that represent a failure. +var helmChartFailConditions = []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.StorageOperationFailedCondition, +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// HelmChartReconciler reconciles a HelmChart object +type HelmChartReconciler struct { + client.Client + kuberecorder.EventRecorder + helper.Metrics + + RegistryClientGenerator RegistryClientGeneratorFunc + Storage *storage.Storage + Getters helmgetter.Providers + ControllerName string + + Cache *cache.Cache + TTL time.Duration + *cache.CacheRecorder + + patchOptions []patch.Option +} + +// RegistryClientGeneratorFunc is a function that returns a registry client +// and an optional file name. +// The file is used to store the registry client credentials. +// The caller is responsible for deleting the file. +type RegistryClientGeneratorFunc func(tlsConfig *tls.Config, isLogin, insecure bool) (*helmreg.Client, string, error) + +func (r *HelmChartReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(ctx, mgr, HelmChartReconcilerOptions{}) +} + +type HelmChartReconcilerOptions struct { + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// helmChartReconcileFunc is the function type for all the v1.HelmChart +// (sub)reconcile functions. The type implementations are grouped and +// executed serially to perform the complete reconcile of the object. +type helmChartReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) + +func (r *HelmChartReconciler) SetupWithManagerAndOptions(ctx context.Context, mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { + r.patchOptions = getPatchOptions(helmChartReadyCondition.Owned, r.ControllerName) + + if err := mgr.GetCache().IndexField(ctx, &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, + r.indexHelmRepositoryByURL); err != nil { + return fmt.Errorf("failed setting index fields: %w", err) + } + if err := mgr.GetCache().IndexField(ctx, &sourcev1.HelmChart{}, sourcev1.SourceIndexKey, + r.indexHelmChartBySource); err != nil { + return fmt.Errorf("failed setting index fields: %w", err) + } + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.HelmChart{}, builder.WithPredicates( + predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), + )). + Watches( + &sourcev1.HelmRepository{}, + handler.EnqueueRequestsFromMapFunc(r.requestsForHelmRepositoryChange), + builder.WithPredicates(SourceRevisionChangePredicate{}), + ). + Watches( + &sourcev1.GitRepository{}, + handler.EnqueueRequestsFromMapFunc(r.requestsForGitRepositoryChange), + builder.WithPredicates(SourceRevisionChangePredicate{}), + ). + Watches( + &sourcev1.Bucket{}, + handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange), + builder.WithPredicates(SourceRevisionChangePredicate{}), + ). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the HelmChart + obj := &sourcev1.HelmChart{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object after each reconciliation. + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmChartReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition + // between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("Reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []helmChartReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the helmChartReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var reconcileAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + reconcileAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + // Run the sub-reconcilers and build the result of reconciliation. + var ( + build chart.Build + res sreconcile.Result + resErr error + ) + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, &build) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, &build, res, resErr) + + return res, resErr +} + +// notify emits notification related to the reconciliation. +func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmChart, build *chart.Build, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact and recovery from any + // failure. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + reasonForBuild(build), build.Summary()) + ctrl.LoggerFrom(ctx).Info(build.Summary()) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, helmChartFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + reasonForBuild(build), build.Summary()) + ctrl.LoggerFrom(ctx).Info(build.Summary()) + } + } + } +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of any URL in the Status of the object are updated, to ensure +// they match the Storage server hostname of current runtime. +func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { + // Remove any failed verification condition. + // The reason is that a failing verification should be recalculated. + if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + + // Retrieve the source + s, err := r.getSource(ctx, obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get source: %w", err), + "SourceUnavailable", + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + + // Return Kubernetes client errors, but ignore others which can only be + // solved by a change in generation + if apierrs.ReasonForError(err) == metav1.StatusReasonUnknown { + return sreconcile.ResultEmpty, serror.NewStalling( + fmt.Errorf("failed to get source: %w", err), + "UnsupportedSourceKind", + ) + } + return sreconcile.ResultEmpty, e + } + + // Assert source has an artifact + if s.GetArtifact() == nil || !r.Storage.ArtifactExist(*s.GetArtifact()) { + // Set the condition to indicate that the source has no artifact for all types except OCI HelmRepository + if helmRepo, ok := s.(*sourcev1.HelmRepository); !ok || helmRepo.Spec.Type != sourcev1.HelmRepositoryTypeOCI { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "NoSourceArtifact", + "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "NoSourceArtifact", + "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) + return sreconcile.ResultRequeue, nil + } + } + + if s.GetArtifact() != nil { + // Record current artifact revision as last observed + obj.Status.ObservedSourceArtifactRevision = s.GetArtifact().Revision + } + + // Defer observation of build result + defer func() { + // Record both success and error observations on the object + observeChartBuild(ctx, sp, r.patchOptions, obj, build, retErr) + + // If we actually build a chart, take a historical note of any dependencies we resolved. + // The reason this is a done conditionally, is because if we have a cached one in storage, + // we can not recover this information (and put it in a condition). Which would result in + // a sudden (partial) disappearance of observed state. + // TODO(hidde): include specific name/version information? + if depNum := build.ResolvedDependencies; build.Complete() && depNum > 0 { + r.Eventf(obj, eventv1.EventTypeTrace, "ResolvedDependencies", "resolved %d chart dependencies", depNum) + } + + // Handle any build error + if retErr != nil { + if buildErr := new(chart.BuildError); errors.As(retErr, &buildErr) { + retErr = serror.NewGeneric( + buildErr, + buildErr.Reason.Reason, + ) + if chart.IsPersistentBuildErrorReason(buildErr.Reason) { + retErr = serror.NewStalling( + buildErr, + buildErr.Reason.Reason, + ) + } + } + } + }() + + // Perform the build for the chart source type + switch typedSource := s.(type) { + case *sourcev1.HelmRepository: + return r.buildFromHelmRepository(ctx, obj, typedSource, build) + case *sourcev1.GitRepository, *sourcev1.Bucket: + return r.buildFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) + default: + // Ending up here should generally not be possible + // as getSource already validates + return sreconcile.ResultEmpty, nil + } +} + +// buildFromHelmRepository attempts to pull and/or package a Helm chart with +// the specified data from the v1.HelmRepository and v1.HelmChart +// objects. +// In case of a failure it records v1.FetchFailedCondition on the chart +// object, and returns early. +func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, + repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { + // Used to login with the repository declared provider + ctxTimeout, cancel := context.WithTimeout(ctx, repo.GetTimeout()) + defer cancel() + + normalizedURL, err := repository.NormalizeURL(repo.Spec.URL) + if err != nil { + return chartRepoConfigErrorReturn(err, obj) + } + + clientOpts, certsTmpDir, err := getter.GetClientOpts(ctxTimeout, r.Client, repo, normalizedURL) + if err != nil && !errors.Is(err, getter.ErrDeprecatedTLSConfig) { + e := serror.NewGeneric( + err, + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + if certsTmpDir != "" { + defer func() { + if err := os.RemoveAll(certsTmpDir); err != nil { + r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason, + "failed to delete temporary certificates directory: %s", err) + } + }() + } + + getterOpts := clientOpts.GetterOpts + + // Initialize the chart repository + var chartRepo repository.Downloader + switch repo.Spec.Type { + case sourcev1.HelmRepositoryTypeOCI: + if !helmreg.IsOCI(normalizedURL) { + err := fmt.Errorf("invalid OCI registry URL: %s", normalizedURL) + return chartRepoConfigErrorReturn(err, obj) + } + + // with this function call, we create a temporary file to store the credentials if needed. + // this is needed because otherwise the credentials are stored in ~/.docker/config.json. + // TODO@souleb: remove this once the registry move to Oras v2 + // or rework to enable reusing credentials to avoid the unneccessary handshake operations + registryClient, credentialsFile, err := r.RegistryClientGenerator(clientOpts.TlsConfig, clientOpts.MustLoginToRegistry(), repo.Spec.Insecure) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to construct Helm client: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + if credentialsFile != "" { + defer func() { + if err := os.Remove(credentialsFile); err != nil { + r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason, + "failed to delete temporary credentials file: %s", err) + } + }() + } + + var verifiers []soci.Verifier + if obj.Spec.Verify != nil { + provider := obj.Spec.Verify.Provider + verifiers, err = r.makeVerifiers(ctx, obj, *clientOpts) + if err != nil { + if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { + provider = fmt.Sprintf("%s keyless", provider) + } + e := serror.NewGeneric( + fmt.Errorf("failed to verify the signature using provider '%s': %w", provider, err), + sourcev1.VerificationError, + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Tell the chart repository to use the OCI client with the configured getter + getterOpts = append(getterOpts, helmgetter.WithRegistryClient(registryClient)) + chartRepoOpts := []repository.OCIChartRepositoryOption{ + repository.WithOCIGetter(r.Getters), + repository.WithOCIGetterOptions(getterOpts), + repository.WithOCIRegistryClient(registryClient), + repository.WithVerifiers(verifiers), + } + if repo.Spec.Insecure { + chartRepoOpts = append(chartRepoOpts, repository.WithInsecureHTTP()) + } + + ociChartRepo, err := repository.NewOCIChartRepository(normalizedURL, chartRepoOpts...) + if err != nil { + return chartRepoConfigErrorReturn(err, obj) + } + + // If login options are configured, use them to login to the registry + // The OCIGetter will later retrieve the stored credentials to pull the chart + if clientOpts.MustLoginToRegistry() { + err = ociChartRepo.Login(clientOpts.RegLoginOpts...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to login to OCI registry: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + chartRepo = ociChartRepo + default: + httpChartRepo, err := repository.NewChartRepository(normalizedURL, r.Storage.LocalPath(*repo.GetArtifact()), r.Getters, clientOpts.TlsConfig, getterOpts...) + if err != nil { + return chartRepoConfigErrorReturn(err, obj) + } + + // NB: this needs to be deferred first, as otherwise the Index will disappear + // before we had a chance to cache it. + defer func() { + if err := httpChartRepo.Clear(); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to clear Helm repository index") + } + }() + + // Attempt to load the index from the cache. + if r.Cache != nil { + if index, ok := r.Cache.Get(repo.GetArtifact().Path); ok { + r.IncCacheEvents(cache.CacheEventTypeHit, repo.Name, repo.Namespace) + r.Cache.SetExpiration(repo.GetArtifact().Path, r.TTL) + httpChartRepo.Index = index.(*helmrepo.IndexFile) + } else { + r.IncCacheEvents(cache.CacheEventTypeMiss, repo.Name, repo.Namespace) + defer func() { + // If we succeed in loading the index, cache it. + if httpChartRepo.Index != nil { + if err = r.Cache.Set(repo.GetArtifact().Path, httpChartRepo.Index, r.TTL); err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err) + } + } + }() + } + } + chartRepo = httpChartRepo + } + + // Construct the chart builder with scoped configuration + cb := chart.NewRemoteBuilder(chartRepo) + opts := chart.BuildOptions{ + ValuesFiles: obj.GetValuesFiles(), + IgnoreMissingValuesFiles: obj.Spec.IgnoreMissingValuesFiles, + Force: obj.Generation != obj.Status.ObservedGeneration, + // The remote builder will not attempt to download the chart if + // an artifact exists with the same name and version and `Force` is false. + // It will however try to verify the chart if `obj.Spec.Verify` is set, at every reconciliation. + Verify: obj.Spec.Verify != nil && obj.Spec.Verify.Provider != "", + } + if artifact := obj.GetArtifact(); artifact != nil { + opts.CachedChart = r.Storage.LocalPath(*artifact) + opts.CachedChartValuesFiles = obj.Status.ObservedValuesFiles + } + + // Set the VersionMetadata to the object's Generation if ValuesFiles is defined + // This ensures changes can be noticed by the Artifact consumer + if len(opts.GetValuesFiles()) > 0 { + opts.VersionMetadata = strconv.FormatInt(obj.Generation, 10) + } + + // Build the chart + ref := chart.RemoteReference{Name: obj.Spec.Chart, Version: obj.Spec.Version} + build, err := cb.Build(ctx, ref, util.TempPathForObj("", ".tgz", obj), opts) + if err != nil { + return sreconcile.ResultEmpty, err + } + + *b = *build + return sreconcile.ResultSuccess, nil +} + +// buildFromTarballArtifact attempts to pull and/or package a Helm chart with +// the specified data from the v1.HelmChart object and the given +// v1.Artifact. +// In case of a failure it records v1.FetchFailedCondition on the chart +// object, and returns early. +func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source meta.Artifact, b *chart.Build) (sreconcile.Result, error) { + // Create temporary working directory + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer os.RemoveAll(tmpDir) + + // Create directory to untar source into + sourceDir := filepath.Join(tmpDir, "source") + if err := os.Mkdir(sourceDir, 0o700); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create directory to untar source into: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Open the tarball artifact file and untar files into working directory + f, err := os.Open(r.Storage.LocalPath(source)) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to open source artifact: %w", err), + sourcev1.ReadOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + if err = tar.Untar(f, sourceDir, tar.WithMaxUntarSize(-1)); err != nil { + _ = f.Close() + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("artifact untar error: %w", err), + meta.FailedReason, + ) + } + if err = f.Close(); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("artifact close error: %w", err), + meta.FailedReason, + ) + } + + // Setup dependency manager + dm := chart.NewDependencyManager( + chart.WithDownloaderCallback(r.namespacedChartRepositoryCallback(ctx, obj.GetName(), obj.GetNamespace())), + ) + defer func() { + err := dm.Clear() + if err != nil { + r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason, + "dependency manager cleanup error: %s", err) + } + }() + + // Configure builder options, including any previously cached chart + opts := chart.BuildOptions{ + ValuesFiles: obj.GetValuesFiles(), + IgnoreMissingValuesFiles: obj.Spec.IgnoreMissingValuesFiles, + Force: obj.Generation != obj.Status.ObservedGeneration, + } + if artifact := obj.GetArtifact(); artifact != nil { + opts.CachedChart = r.Storage.LocalPath(*artifact) + opts.CachedChartValuesFiles = obj.Status.ObservedValuesFiles + } + + // Configure revision metadata for chart build if we should react to revision changes + if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { + rev := source.Revision + if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { + rev = git.ExtractHashFromRevision(rev).String() + } + if obj.Spec.SourceRef.Kind == sourcev1.BucketKind { + if dig := digest.Digest(rev); dig.Validate() == nil { + rev = dig.Encoded() + } + } + if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { + // The SemVer from the metadata is at times used in e.g. the label metadata for a resource + // in a chart, which has a limited length of 63 characters. + // To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and + // even more for SHA-2 for a chart from a Bucket), we shorten this to the first 12 + // characters taken from the hex. + // For SHA-1, this has proven to be unique in the Linux kernel with over 875.000 commits + // (http://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#Short-SHA-1). + // Note that for a collision to be problematic, it would need to happen right after the + // previous SHA for the artifact, which is highly unlikely, if not virtually impossible. + // Ref: https://en.wikipedia.org/wiki/Birthday_attack + rev = rev[0:12] + } + opts.VersionMetadata = rev + } + // Set the VersionMetadata to the object's Generation if ValuesFiles is defined, + // this ensures changes can be noticed by the Artifact consumer + if len(opts.GetValuesFiles()) > 0 { + if opts.VersionMetadata != "" { + opts.VersionMetadata += "." + } + opts.VersionMetadata += strconv.FormatInt(obj.Generation, 10) + } + + // Build chart + cb := chart.NewLocalBuilder(dm) + build, err := cb.Build(ctx, chart.LocalReference{ + WorkDir: sourceDir, + Path: obj.Spec.Chart, + }, util.TempPathForObj("", ".tgz", obj), opts) + if err != nil { + return sreconcile.ResultEmpty, err + } + + *b = *build + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact does not differ from the object's current, it returns +// early. +// On a successful archive, the Artifact in the Status of the object is set, +// and the symlink in the Storage is updated to its path. +func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) { + // Without a complete chart build, there is little to reconcile + if !b.Complete() { + return sreconcile.ResultRequeue, nil + } + + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if obj.Status.ObservedChartName == b.Name && obj.GetArtifact().HasRevision(b.Version) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, reasonForBuild(b), "%s", b.Summary()) + } + }() + + // Create artifact from build data + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), b.Version, fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) + + // Return early if the build path equals the current artifact path + if curArtifact := obj.GetArtifact(); curArtifact != nil && r.Storage.LocalPath(*curArtifact) == b.Path { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Garbage collect chart build once persisted to storage + defer os.Remove(b.Path) + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + sourcev1.AcquireLockFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer unlock() + + // Copy the packaged chart to the artifact path + if err = r.Storage.CopyFromPath(&artifact, b.Path); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to copy Helm chart to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.ObservedChartName = b.Name + if obj.Spec.IgnoreMissingValuesFiles { + obj.Status.ObservedValuesFiles = b.ValuesFiles + } else { + obj.Status.ObservedValuesFiles = nil + } + + // Update symlink on a "best effort" basis + symURL, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to update status URL symlink: %s", err) + } + if symURL != "" { + obj.Status.URL = symURL + } + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// getSource returns the v1beta1.Source for the given object, or an error describing why the source could not be +// returned. +func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmChart) (sourcev1.Source, error) { + namespacedName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SourceRef.Name, + } + var s sourcev1.Source + switch obj.Spec.SourceRef.Kind { + case sourcev1.HelmRepositoryKind: + var repo sourcev1.HelmRepository + if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { + return nil, err + } + s = &repo + case sourcev1.GitRepositoryKind: + var repo sourcev1.GitRepository + if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { + return nil, err + } + s = &repo + case sourcev1.BucketKind: + var bucket sourcev1.Bucket + if err := r.Client.Get(ctx, namespacedName, &bucket); err != nil { + return nil, err + } + s = &bucket + default: + return nil, fmt.Errorf("unsupported source kind '%s', must be one of: %v", obj.Spec.SourceRef.Kind, []string{ + sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1.BucketKind}) + } + return s, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmChart) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless the +// deletion timestamp on the object is set. Which will result in the +// removal of all Artifacts for the objects. +func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// namespacedChartRepositoryCallback returns a chart.GetChartDownloaderCallback scoped to the given namespace. +// The returned callback returns a repository.Downloader configured with the retrieved v1beta1.HelmRepository, +// or a shim with defaults if no object could be found. +// The callback returns an object with a state, so the caller has to do the necessary cleanup. +func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Context, name, namespace string) chart.GetChartDownloaderCallback { + return func(url string) (repository.Downloader, error) { + normalizedURL, err := repository.NormalizeURL(url) + if err != nil { + return nil, err + } + obj, err := r.resolveDependencyRepository(ctx, url, namespace) + if err != nil { + // Return Kubernetes client errors, but ignore others + if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown { + return nil, err + } + obj = &sourcev1.HelmRepository{ + Spec: sourcev1.HelmRepositorySpec{ + URL: url, + Timeout: &metav1.Duration{Duration: 60 * time.Second}, + }, + } + } + + // Used to login with the repository declared provider + ctxTimeout, cancel := context.WithTimeout(ctx, obj.GetTimeout()) + defer cancel() + + clientOpts, certsTmpDir, err := getter.GetClientOpts(ctxTimeout, r.Client, obj, normalizedURL) + if err != nil && !errors.Is(err, getter.ErrDeprecatedTLSConfig) { + return nil, err + } + getterOpts := clientOpts.GetterOpts + + var chartRepo repository.Downloader + if helmreg.IsOCI(normalizedURL) { + registryClient, credentialsFile, err := r.RegistryClientGenerator(clientOpts.TlsConfig, clientOpts.MustLoginToRegistry(), obj.Spec.Insecure) + if err != nil { + return nil, fmt.Errorf("failed to create registry client: %w", err) + } + + var errs []error + // Tell the chart repository to use the OCI client with the configured getter + getterOpts = append(getterOpts, helmgetter.WithRegistryClient(registryClient)) + ociChartRepo, err := repository.NewOCIChartRepository(normalizedURL, repository.WithOCIGetter(r.Getters), + repository.WithOCIGetterOptions(getterOpts), + repository.WithOCIRegistryClient(registryClient), + repository.WithCertificatesStore(certsTmpDir), + repository.WithCredentialsFile(credentialsFile)) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create OCI chart repository: %w", err)) + // clean up the credentialsFile + if credentialsFile != "" { + if err := os.Remove(credentialsFile); err != nil { + errs = append(errs, err) + } + } + return nil, kerrors.NewAggregate(errs) + } + + // If login options are configured, use them to login to the registry + // The OCIGetter will later retrieve the stored credentials to pull the chart + if clientOpts.MustLoginToRegistry() { + err = ociChartRepo.Login(clientOpts.RegLoginOpts...) + if err != nil { + errs = append(errs, fmt.Errorf("failed to login to OCI chart repository: %w", err)) + // clean up the credentialsFile + errs = append(errs, ociChartRepo.Clear()) + return nil, kerrors.NewAggregate(errs) + } + } + + chartRepo = ociChartRepo + } else { + httpChartRepo, err := repository.NewChartRepository(normalizedURL, "", r.Getters, clientOpts.TlsConfig, getterOpts...) + if err != nil { + return nil, err + } + + if artifact := obj.GetArtifact(); artifact != nil { + httpChartRepo.Path = r.Storage.LocalPath(*artifact) + + // Attempt to load the index from the cache. + if r.Cache != nil { + if index, ok := r.Cache.Get(artifact.Path); ok { + r.IncCacheEvents(cache.CacheEventTypeHit, name, namespace) + r.Cache.SetExpiration(artifact.Path, r.TTL) + httpChartRepo.Index = index.(*helmrepo.IndexFile) + } else { + r.IncCacheEvents(cache.CacheEventTypeMiss, name, namespace) + if err := httpChartRepo.LoadFromPath(); err != nil { + return nil, err + } + r.Cache.Set(artifact.Path, httpChartRepo.Index, r.TTL) + } + } + } + + chartRepo = httpChartRepo + } + + return chartRepo, nil + } +} + +func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) { + listOpts := []client.ListOption{ + client.InNamespace(namespace), + client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, + client.Limit(1), + } + var list sourcev1.HelmRepositoryList + err := r.Client.List(ctx, &list, listOpts...) + if err != nil { + return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) + } + if len(list.Items) > 0 { + return &list.Items[0], nil + } + return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace) +} + +func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string { + repo, ok := o.(*sourcev1.HelmRepository) + if !ok { + panic(fmt.Sprintf("Expected a HelmRepository, got %T", o)) + } + u, err := repository.NormalizeURL(repo.Spec.URL) + if u != "" && err == nil { + return []string{u} + } + return nil +} + +func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { + hc, ok := o.(*sourcev1.HelmChart) + if !ok { + panic(fmt.Sprintf("Expected a HelmChart, got %T", o)) + } + return []string{fmt.Sprintf("%s/%s", hc.Spec.SourceRef.Kind, hc.Spec.SourceRef.Name)} +} + +func (r *HelmChartReconciler) requestsForHelmRepositoryChange(ctx context.Context, o client.Object) []reconcile.Request { + repo, ok := o.(*sourcev1.HelmRepository) + if !ok { + ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a HelmRepository, got %T", o), "failed to get requests for HelmRepository change") + return nil + } + + // If we do not have an artifact, we have no requests to make + if repo.GetArtifact() == nil { + return nil + } + + var list sourcev1.HelmChartList + if err := r.List(ctx, &list, client.MatchingFields{ + sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.HelmRepositoryKind, repo.Name), + }); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for HelmRepository change") + return nil + } + + var reqs []reconcile.Request + for i, v := range list.Items { + if v.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&list.Items[i])}) + } + } + return reqs +} + +func (r *HelmChartReconciler) requestsForGitRepositoryChange(ctx context.Context, o client.Object) []reconcile.Request { + repo, ok := o.(*sourcev1.GitRepository) + if !ok { + ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a GitRepository, got %T", o), + "failed to get reconcile requests for GitRepository change") + return nil + } + + // If we do not have an artifact, we have no requests to make + if repo.GetArtifact() == nil { + return nil + } + + var list sourcev1.HelmChartList + if err := r.List(ctx, &list, client.MatchingFields{ + sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.GitRepositoryKind, repo.Name), + }); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for GitRepository change") + return nil + } + + var reqs []reconcile.Request + for i, v := range list.Items { + if !repo.GetArtifact().HasRevision(v.Status.ObservedSourceArtifactRevision) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&list.Items[i])}) + } + } + return reqs +} + +func (r *HelmChartReconciler) requestsForBucketChange(ctx context.Context, o client.Object) []reconcile.Request { + bucket, ok := o.(*sourcev1.Bucket) + if !ok { + ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a Bucket, got %T", o), + "failed to get reconcile requests for Bucket change") + return nil + } + + // If we do not have an artifact, we have no requests to make + if bucket.GetArtifact() == nil { + return nil + } + + var list sourcev1.HelmChartList + if err := r.List(ctx, &list, client.MatchingFields{ + sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name), + }); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for Bucket change") + return nil + } + + var reqs []reconcile.Request + for i, v := range list.Items { + if !bucket.GetArtifact().HasRevision(v.Status.ObservedSourceArtifactRevision) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&list.Items[i])}) + } + } + return reqs +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// observeChartBuild records the observation on the given given build and error on the object. +func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []patch.Option, obj *sourcev1.HelmChart, build *chart.Build, err error) { + if build.HasMetadata() { + if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) { + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", "%s", build.Summary()) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", build.Summary()) + if err := sp.Patch(ctx, obj, pOpts...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + } + } + } + + if build.Complete() { + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.Delete(obj, sourcev1.BuildFailedCondition) + if build.VerifiedResult == soci.VerificationResultSuccess { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version %s", build.Version) + } + } + + if obj.Spec.Verify == nil { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + + if err != nil { + var buildErr *chart.BuildError + if ok := errors.As(err, &buildErr); !ok { + buildErr = &chart.BuildError{ + Reason: chart.ErrUnknown, + Err: err, + } + } + + switch buildErr.Reason { + case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, "%s", buildErr) + case chart.ErrChartVerification: + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, "%s", buildErr) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "%s", buildErr) + default: + conditions.Delete(obj, sourcev1.BuildFailedCondition) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, "%s", buildErr) + } + return + } +} + +func reasonForBuild(build *chart.Build) string { + if !build.Complete() { + return "" + } + if build.Packaged { + return sourcev1.ChartPackageSucceededReason + } + return sourcev1.ChartPullSucceededReason +} + +func chartRepoConfigErrorReturn(err error, obj *sourcev1.HelmChart) (sreconcile.Result, error) { + switch err.(type) { + case *url.Error: + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + default: + e := serror.NewStalling( + fmt.Errorf("failed to construct Helm client: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } +} + +// makeVerifiers returns a list of verifiers for the given chart. +func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *sourcev1.HelmChart, clientOpts getter.ClientOpts) ([]soci.Verifier, error) { + var verifiers []soci.Verifier + verifyOpts := []remote.Option{} + + if clientOpts.Authenticator != nil { + verifyOpts = append(verifyOpts, remote.WithAuth(clientOpts.Authenticator)) + } else { + verifyOpts = append(verifyOpts, remote.WithAuthFromKeychain(clientOpts.Keychain)) + } + + switch obj.Spec.Verify.Provider { + case "cosign": + defaultCosignOciOpts := []scosign.Options{ + scosign.WithRemoteOptions(verifyOpts...), + } + + // get the public keys from the given secret + if secretRef := obj.Spec.Verify.SecretRef; secretRef != nil { + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctx, verifySecret) + if err != nil { + return nil, err + } + + for k, data := range pubSecret.Data { + // search for public keys in the secret + if strings.HasSuffix(k, ".pub") { + verifier, err := scosign.NewCosignVerifier(ctx, append(defaultCosignOciOpts, scosign.WithPublicKey(data))...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + } + } + + if len(verifiers) == 0 { + return nil, fmt.Errorf("no public keys found in secret '%s'", verifySecret.String()) + } + return verifiers, nil + } + + // if no secret is provided, add a keyless verifier + var identities []cosign.Identity + for _, match := range obj.Spec.Verify.MatchOIDCIdentity { + identities = append(identities, cosign.Identity{ + IssuerRegExp: match.Issuer, + SubjectRegExp: match.Subject, + }) + } + defaultCosignOciOpts = append(defaultCosignOciOpts, scosign.WithIdentities(identities)) + + verifier, err := scosign.NewCosignVerifier(ctx, defaultCosignOciOpts...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return verifiers, nil + case "notation": + // get the public keys from the given secret + secretRef := obj.Spec.Verify.SecretRef + + if secretRef == nil { + return nil, fmt.Errorf("verification secret cannot be empty: '%s'", obj.Name) + } + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctx, verifySecret) + if err != nil { + return nil, err + } + + data, ok := pubSecret.Data[notation.DefaultTrustPolicyKey] + if !ok { + return nil, fmt.Errorf("'%s' not found in secret '%s'", notation.DefaultTrustPolicyKey, verifySecret.String()) + } + + var doc trustpolicy.Document + + if err := json.Unmarshal(data, &doc); err != nil { + return nil, fmt.Errorf("error occurred while parsing %s: %w", notation.DefaultTrustPolicyKey, err) + } + + var certs [][]byte + + for k, data := range pubSecret.Data { + if strings.HasSuffix(k, ".crt") || strings.HasSuffix(k, ".pem") { + certs = append(certs, data) + } + } + + if certs == nil { + return nil, fmt.Errorf("no certificates found in secret '%s'", verifySecret.String()) + } + + trustPolicy := notation.CleanTrustPolicy(&doc, ctrl.LoggerFrom(ctx)) + defaultNotationOciOpts := []notation.Options{ + notation.WithTrustPolicy(trustPolicy), + notation.WithRemoteOptions(verifyOpts...), + notation.WithAuth(clientOpts.Authenticator), + notation.WithKeychain(clientOpts.Keychain), + notation.WithInsecureRegistry(clientOpts.Insecure), + notation.WithLogger(ctrl.LoggerFrom(ctx)), + notation.WithRootCertificates(certs), + } + + verifier, err := notation.NewNotationVerifier(defaultNotationOciOpts...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return verifiers, nil + default: + return nil, fmt.Errorf("unsupported verification provider: %s", obj.Spec.Verify.Provider) + } +} + +// retrieveSecret retrieves a secret from the specified namespace with the given secret name. +// It returns the retrieved secret and any error encountered during the retrieval process. +func (r *HelmChartReconciler) retrieveSecret(ctx context.Context, verifySecret types.NamespacedName) (corev1.Secret, error) { + + var pubSecret corev1.Secret + + if err := r.Get(ctx, verifySecret, &pubSecret); err != nil { + return corev1.Secret{}, err + } + return pubSecret, nil +} diff --git a/internal/controller/helmchart_controller_test.go b/internal/controller/helmchart_controller_test.go new file mode 100644 index 000000000..190a9f8b5 --- /dev/null +++ b/internal/controller/helmchart_controller_test.go @@ -0,0 +1,3515 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "bytes" + "context" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/notaryproject/notation-core-go/signature/cose" + "github.com/notaryproject/notation-core-go/testhelper" + "github.com/notaryproject/notation-go" + nr "github.com/notaryproject/notation-go/registry" + "github.com/notaryproject/notation-go/signer" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" + "github.com/sigstore/cosign/v2/pkg/cosign" + hchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + helmreg "helm.sh/helm/v3/pkg/registry" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + oras "oras.land/oras-go/v2/registry/remote" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/helm/chart" + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/registry" + "github.com/fluxcd/source-controller/internal/oci" + snotation "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +func TestHelmChartReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "helmchart-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + helmchart := &sourcev1.HelmChart{} + helmchart.Name = "test-helmchart" + helmchart.Namespace = namespaceName + helmchart.Spec = sourcev1.HelmChartSpec{ + Interval: metav1.Duration{Duration: interval}, + Chart: "foo", + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: "HelmRepository", + Name: "bar", + }, + } + // Add a test finalizer to prevent the object from getting deleted. + helmchart.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, helmchart)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, helmchart)).NotTo(HaveOccurred()) + + r := &HelmChartReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(helmchart)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestHelmChartReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + chartPath = "testdata/charts/helmchart" + ) + + serverFactory, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(serverFactory.Root()) + + g.Expect(serverFactory.PackageChartWithVersion(chartPath, chartVersion)).To(Succeed()) + g.Expect(serverFactory.GenerateIndex()).To(Succeed()) + + tests := []struct { + name string + beforeFunc func(repository *sourcev1.HelmRepository) + assertFunc func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + }{ + { + name: "Reconciles chart build", + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + origObj := obj.DeepCopy() + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmChart to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + // Check if the cache contains the index. + repoKey := client.ObjectKey{Name: repository.Name, Namespace: repository.Namespace} + err = testEnv.Get(ctx, repoKey, repository) + g.Expect(err).ToNot(HaveOccurred()) + _, found := testCache.Get(repository.GetArtifact().Path) + g.Expect(found).To(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + // NOTE: Since the object is already created when received in + // this assertFunc, reset the ResourceVersion from the object + // before recreating it to avoid API server error. + obj = origObj.DeepCopy() + obj.ResourceVersion = "" + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) + }, + }, + { + name: "Stalling on invalid repository URL", + beforeFunc: func(repository *sourcev1.HelmRepository) { + repository.Spec.URL = "https://unsupported/foo://" // Invalid URL + }, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + // Wait for HelmChart to be FetchFailed == true + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) { + return false + } + // observedGeneration is -1 because we have no successful reconciliation + return obj.Status.ObservedGeneration == -1 + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }, + }, + { + name: "Stalling on invalid oci repository URL", + beforeFunc: func(repository *sourcev1.HelmRepository) { + repository.Spec.URL = strings.Replace(repository.Spec.URL, "http", "oci", 1) + }, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + // Wait for HelmChart to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) { + return false + } + // observedGeneration is -1 because we have no successful reconciliation + return obj.Status.ObservedGeneration == -1 + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmChart to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server := testserver.NewHTTPServer(serverFactory.Root()) + server.Start() + defer server.Stop() + + ns, err := testEnv.CreateNamespace(ctx, "helmchart") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + repository := sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: server.URL(), + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(&repository) + } + + g.Expect(testEnv.CreateAndWait(ctx, &repository)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, &repository)).To(Succeed()) }() + + obj := sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmChartSpec{ + Chart: chartName, + Version: chartVersion, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, &obj)).To(Succeed()) + + if tt.assertFunc != nil { + tt.assertFunc(g, &obj, &repository) + } + }) + } +} + +func TestHelmChartReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmChart, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + defer func() { + g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed()) + }() + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(context.TODO(), sp, obj, nil) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestHelmChartReconciler_reconcileSource(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + gitArtifact := &meta.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(st.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) + + tests := []struct { + name string + source sourcev1.Source + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Observes Artifact revision and build result", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeTrue()) + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(BeARegularFile()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + })) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Existing artifact makes AritfactOutdated=True", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeTrue()) + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(BeARegularFile()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + })) + }, + }, + { + name: "Error on unavailable source", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: sourcev1.GitRepositoryKind, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + })) + }, + }, + { + name: "Stalling on unsupported source kind", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: "Unsupported", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: unsupported source kind"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + })) + }, + }, + { + name: "Stalling on persistent build error", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Spec.ValuesFiles = []string{"invalid.yaml"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ValuesFilesError", "values files merge error: no values file found at path"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + })) + }, + }, + { + name: "ResultRequeue when source artifact is unavailable", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{}, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Status.ObservedSourceArtifactRevision = "foo" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultRequeue, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo")) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + })) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + if tt.source != nil { + clientBuilder.WithRuntimeObjects(tt.source) + } + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: st, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "chart", + Namespace: "default", + Generation: 1, + }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(&obj) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), &obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), &obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(&obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, &obj, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, b, obj) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, &obj) + }) + } +} + +func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { + g := NewWithT(t) + + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + higherChartVersion = "0.3.0" + chartPath = "testdata/charts/helmchart" + ) + + serverFactory, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(serverFactory.Root()) + + for _, ver := range []string{chartVersion, higherChartVersion} { + g.Expect(serverFactory.PackageChartWithVersion(chartPath, ver)).To(Succeed()) + } + g.Expect(serverFactory.GenerateIndex()).To(Succeed()) + + type options struct { + username string + password string + } + + tests := []struct { + name string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Reconciles chart build", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = "helmchart" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Reconciles chart build with repository credentials", + server: options{ + username: "foo", + password: "bar", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Data: map[string][]byte{ + "username": []byte("foo"), + "password": []byte("bar"), + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Uses artifact as build cache", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + }, + { + name: "Uses artifact as build cache with observedValuesFiles", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + }, + { + name: "Sets Generation as VersionMetadata with values files", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Generation = 3 + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion + "+3")) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Missing values files are an error", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.ValuesFiles = []string{"missing.yaml"} + }, + wantErr: &chart.BuildError{Err: errors.New("values files merge error: failed to merge chart values: no values file found at path 'missing.yaml'")}, + }, + { + name: "All missing values files ignored", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Spec.ValuesFiles = []string{"missing.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion + "+0")) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Partial missing values files ignored", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml", "invalid.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion + "+0")) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Forces build on generation change", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Generation = 3 + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + + obj.Status.ObservedGeneration = 2 + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Event on unsuccessful secret retrieval", + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "invalid", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), + })) + }, + }, + { + name: "Stalling on invalid client options", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "file://unsupported" // Unsupported protocol + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), + })) + }, + }, + { + name: "Stalling on invalid repository URL", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "://unsupported" // Invalid URL + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "invalid Helm repository URL"), + })) + }, + }, + { + name: "BuildError on temporary build error", + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + obj.Spec.Chart = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server := testserver.NewHTTPServer(serverFactory.Root()) + server.Start() + defer server.Stop() + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret.DeepCopy()) + } + + testStorage, err := newTestStorage(server) + g.Expect(err).ToNot(HaveOccurred()) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: server.URL(), + Timeout: &metav1.Duration{Duration: timeout}, + }, + Status: sourcev1.HelmRepositoryStatus{ + Artifact: &meta.Artifact{ + Path: "index.yaml", + }, + }, + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmChartSpec{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, repository) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, obj, b) + } + }) + } +} + +func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).NotTo(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, testRegistryServer, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + tests := []struct { + name string + secret *corev1.Secret + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Reconciles chart build with docker repository credentials", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + ".dockerconfigjson": []byte(`{"auths":{"` + + testRegistryServer.registryHost + `":{"` + + `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`), + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Reconciles chart build with repository credentials", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Data: map[string][]byte{ + "username": []byte(testRegistryUsername), + "password": []byte(testRegistryPassword), + }, + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Uses artifact as build cache", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + }, + { + name: "Forces build on generation change", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Generation = 3 + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + + obj.Status.ObservedGeneration = 2 + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(metadata.Name)) + g.Expect(build.Version).To(Equal(metadata.Version)) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Event on unsuccessful secret retrieval", + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "invalid", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), + })) + }, + }, + { + name: "Stalling on invalid client options", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "https://unsupported" // Unsupported protocol + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("failed to construct Helm client: invalid OCI registry URL: https://unsupported")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), + })) + }, + }, + { + name: "BuildError on temporary build error", + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + obj.Spec.Chart = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret.DeepCopy()) + } + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", testRegistryServer.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmChartSpec{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, repository) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) + + if tt.wantErr != nil { + g.Expect(err).To(HaveOccurred()) + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + } + + if tt.assertFunc != nil { + tt.assertFunc(g, obj, b) + } + }) + } +} + +func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + chartsArtifact := &meta.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(st.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) + yamlArtifact := &meta.Artifact{ + Revision: "9876abcd", + Path: "values.yaml", + } + g.Expect(st.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: "cached.tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + tests := []struct { + name string + source meta.Artifact + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Resolves chart dependencies and builds", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchartwithdeps" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchartwithdeps")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.ResolvedDependencies).To(Equal(4)) + g.Expect(build.Path).To(BeARegularFile()) + chart, err := secureloader.LoadFile(build.Path) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chart.Metadata.Name).To(Equal("helmchartwithdeps")) + g.Expect(chart.Metadata.Version).To(Equal("0.1.0")) + g.Expect(chart.Dependencies()).To(HaveLen(4)) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ReconcileStrategyRevision sets VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+abcdefg12345")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ValuesFiles sets Generation as VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 3 + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ValuesFiles = []string{ + filepath.Join(obj.Spec.Chart, "values.yaml"), + filepath.Join(obj.Spec.Chart, "override.yaml"), + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+3")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{ + "testdata/charts/helmchart/values.yaml", + "testdata/charts/helmchart/override.yaml", + })) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Chart from storage cache", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + }, + { + name: "Chart from storage cache with ObservedValuesFiles", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + }, + { + name: "Generation change forces rebuild", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 2 + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedGeneration = 1 + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Empty source artifact", + source: meta.Artifact{}, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("no such file or directory")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + { + name: "Invalid artifact type", + source: *yamlArtifact, + want: sreconcile.ResultEmpty, + wantErr: &serror.Generic{Err: errors.New("artifact untar error: requires gzip-compressed body")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.Scheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: st, + Getters: testGetters, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "artifact", + Namespace: "default", + }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + got, err := r.buildFromTarballArtifact(context.TODO(), obj, tt.source, &b) + if err != nil { + t.Log(err) + } + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, b) + } + }) + } +} + +func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + build *chart.Build + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj *sourcev1.HelmChart) + }{ + { + name: "Incomplete build requeues and does not update status", + build: &chart.Build{}, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "Foo", ""), + }, + }, + { + name: "Copying artifact to storage from build makes ArtifactInStorage=True", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Up-to-date chart build does not persist artifact to storage", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{ + Path: "testdata/charts/helmchart-0.1.0.tgz", + } + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.ObservedChartName).To(BeEmpty()) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + }, + { + name: "Restores conditions in case artifact matches current chart build", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + Packaged: true, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.ObservedChartName = "helmchart" + obj.Status.Artifact = &meta.Artifact{ + Revision: "0.1.0", + Path: "testdata/charts/helmchart-0.1.0.tgz", + } + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Updates ObservedValuesFiles after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", []string{"values.yaml", "override.yaml"}), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Updates ObservedValuesFiles with IgnoreMissingValuesFiles after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", []string{"values.yaml", "override.yaml"}), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + obj.Spec.ValuesFiles = []string{"values.yaml", "missing.yaml", "override.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "reconcile-artifact-", + Generation: 1, + }, + Status: sourcev1.HelmChartStatus{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(ctx, sp, obj, tt.build) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} + +func TestHelmChartReconciler_getSource(t *testing.T) { + mocks := []client.Object{ + &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmrepository", + Namespace: "foo", + }, + }, + &sourcev1.GitRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.GitRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "foo", + }, + }, + &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Namespace: "foo", + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithStatusSubresource(&sourcev1.HelmChart{}). + WithObjects(mocks...) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + tests := []struct { + name string + obj *sourcev1.HelmChart + want sourcev1.Source + wantErr bool + }{ + { + name: "Get HelmRepository source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[0].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[0].GetName(), + Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[0].(sourcev1.Source), + }, + { + name: "Get GitRepository source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[1].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[1].(sourcev1.Source), + }, + { + name: "Get Bucket source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[2].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[2].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[2].(sourcev1.Source), + }, + { + name: "Error on client error", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[2].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + wantErr: true, + }, + { + name: "Error on unsupported source kind", + obj: &sourcev1.HelmChart{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: "unsupported", + Kind: "Unsupported", + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := r.getSource(context.TODO(), tt.obj) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(got).To(BeNil()) + return + } + + // TODO(stefan): Remove this workaround when the controller-runtime fake client restores TypeMeta + // https://github.com/kubernetes-sigs/controller-runtime/issues/3302 + unstructuredGot, err := runtime.DefaultUnstructuredConverter.ToUnstructured(got) + g.Expect(err).ToNot(HaveOccurred()) + gotName, _, err := unstructured.NestedFieldCopy(unstructuredGot, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + unstructuredWant, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.want) + g.Expect(err).ToNot(HaveOccurred()) + wantName, _, err := unstructured.NestedFieldCopy(unstructuredWant, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(gotName).To(Equal(wantName)) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestHelmChartReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.HelmChartStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmChartReconcileFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcileFunc { + return func(_ context.Context, _ *patch.SerialPatcher, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmChartReconcileFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmChartReconcileFunc{ + func(_ context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(_ context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmChartReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}). + Build(), + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmChartStatus{ + ObservedGeneration: tt.observedGeneration, + }, + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcile(context.TODO(), sp, obj, tt.reconcileFuncs) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func mockChartBuild(name, version, path string, valuesFiles []string) *chart.Build { + var copyP string + if path != "" { + f, err := os.Open(path) + if err == nil { + defer f.Close() + ff, err := os.CreateTemp("", "chart-mock-*.tgz") + if err == nil { + defer ff.Close() + if _, err = io.Copy(ff, f); err == nil { + copyP = ff.Name() + } + } + } + } + return &chart.Build{ + Name: name, + Version: version, + Path: copyP, + ValuesFiles: valuesFiles, + } +} + +func TestHelmChartReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmChart) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "positive conditions only", + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, "ChartPackageError", "some error") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartPackageError", "some error"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.HelmChart{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmChartKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmchart", + Namespace: "foo", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithObjects(obj). + WithStatusSubresource(&sourcev1.HelmChart{}) + + c := clientBuilder.Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmChartReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_notify(t *testing.T) { + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.HelmChart) + newObjBeforeFunc func(obj *sourcev1.HelmChart) + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + }, + wantEvent: "Normal ChartPackageSucceeded packaged", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal ChartPackageSucceeded packaged", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal ChartPackageSucceeded packaged", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.HelmChart{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &HelmChartReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + build := &chart.Build{ + Name: "foo", + Version: "1.0.0", + Path: "some/path", + Packaged: true, + } + reconciler.notify(ctx, oldObj, newObj, build, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + type secretOptions struct { + username string + password string + } + + tests := []struct { + name string + url string + registryOpts registryOptions + secretOpts secretOptions + secret *corev1.Secret + certSecret *corev1.Secret + insecure bool + provider string + providerImg string + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without basic auth", + want: sreconcile.ResultSuccess, + insecure: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTP with basic auth secret", + want: sreconcile.ResultSuccess, + insecure: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTP registry - basic auth with invalid secret", + want: sreconcile.ResultEmpty, + wantErr: true, + insecure: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + secretOpts: secretOptions{ + username: "wrong-pass", + password: "wrong-pass", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to login to OCI registry"), + }, + }, + { + name: "with contextual login provider", + wantErr: true, + insecure: true, + provider: "aws", + providerImg: "oci://123456789000.dkr.ecr.us-east-2.amazonaws.com/test", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to get credential from"), + }, + }, + { + name: "with contextual login provider and secretRef", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + provider: "azure", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTPS With invalid CA cert", + wantErr: true, + registryOpts: registryOptions{ + withTLS: true, + withClientCertAuth: true, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{}, + }, + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "certs-secretref", + }, + Data: map[string][]byte{ + "ca.crt": []byte("invalid caFile"), + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to construct Helm client's TLS config: failed to parse CA certificate"), + }, + }, + { + name: "HTTPS With CA cert only", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "certs-secretref", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "HTTPS With CA cert and client cert auth", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + withClientCertAuth: true, + }, + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "certs-secretref", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": clientPrivateKey, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmChart{}) + + workspaceDir := t.TempDir() + + server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "testdata/certs/client.pem", "testdata/certs/client-key.pem", "testdata/certs/ca.pem") + g.Expect(err).ToNot(HaveOccurred()) + + repo := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Type: sourcev1.HelmRepositoryTypeOCI, + Provider: sourcev1.GenericOCIProvider, + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Insecure: tt.insecure, + }, + } + + if tt.provider != "" { + repo.Spec.Provider = tt.provider + } + // If a provider specific image is provided, overwrite existing URL + // set earlier. It'll fail, but it's necessary to set them because + // the login check expects the URLs to be of certain pattern. + if tt.providerImg != "" { + repo.Spec.URL = tt.providerImg + } + + if tt.secretOpts.username != "" && tt.secretOpts.password != "" { + tt.secret.Data[".dockerconfigjson"] = []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`, + server.registryHost, tt.secretOpts.username, tt.secretOpts.password)) + } + + if tt.secret != nil { + repo.Spec.SecretRef = &meta.LocalObjectReference{ + Name: tt.secret.Name, + } + clientBuilder.WithObjects(tt.secret) + } + + if tt.certSecret != nil { + repo.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.certSecret.Name, + } + clientBuilder.WithObjects(tt.certSecret) + } + + clientBuilder.WithObjects(repo) + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.HelmChartSpec{ + Chart: metadata.Name, + Version: metadata.Version, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repo.Name, + }, + Interval: metav1.Duration{Duration: interval}, + }, + } + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + var b chart.Build + defer func() { + if _, err := os.Stat(b.Path); !os.IsNotExist(err) { + err := os.Remove(b.Path) + g.Expect(err).NotTo(HaveOccurred()) + } + }() + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testing.T) { + tests := []struct { + name string + version string + want sreconcile.Result + wantErr bool + beforeFunc func(obj *sourcev1.HelmChart) + assertConditions []metav1.Condition + revision string + }{ + { + name: "signed image with no identity matching specified should pass verification", + version: "6.5.1", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "signed image with correct subject and issuer should pass verification", + version: "6.5.1", + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "signed image with incorrect and correct identity matchers should pass verification", + version: "6.5.1", + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "signed image with incorrect subject and issuer should not pass verification", + version: "6.5.1", + wantErr: true, + want: sreconcile.ResultEmpty, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no matching signatures: none of the expected identities matched what was in the certificate"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no matching signatures"), + }, + revision: "6.5.1@sha256:af589b918022cd8d85a4543312d28170c2e894ccab8484050ff4bdefdde30b4e", + }, + { + name: "unsigned image should not pass verification", + version: "6.1.0", + wantErr: true, + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), + }, + revision: "6.1.0@sha256:642383f56ccb529e3f658d40312d01b58d9bc6caeef653da43e58d1afe88982a", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/charts", + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + }, + } + clientBuilder.WithObjects(repository) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: testStorage, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + Version: tt.version, + Chart: "podinfo", + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + }, + }, + } + chartUrl := fmt.Sprintf("%s/%s:%s", repository.Spec.URL, obj.Spec.Chart, obj.Spec.Version) + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.Chart) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + var b chart.Build + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + sg, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelStrict.Name, Override: map[trustpolicy.ValidationType]trustpolicy.ValidationAction{trustpolicy.TypeRevocation: trustpolicy.ActionSkip}}, + TrustStores: []string{"ca:valid-trust-store"}, + TrustedIdentities: []string{"*"}, + }, + }, + } + + tests := []struct { + name string + shouldSign bool + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + wantErrMsg string + addMultipleCerts bool + provideNoCert bool + provideNoPolicy bool + assertConditions []metav1.Condition + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "unsigned charts should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "chart verification error: failed to verify : no signature", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signature"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signature"), + }, + }, + { + name: "signed charts should pass verification", + shouldSign: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "multiple certs should still pass verification", + addMultipleCerts: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = nil + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled '' chart with version ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "no cert provided should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + wantErr: true, + provideNoCert: true, + // no namespace but the namespace name should appear before the /notation-config + wantErrMsg: "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'"), + }, + }, + { + name: "empty string should fail verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + provideNoPolicy: true, + wantErr: true, + wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + + policy, err := json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + + data := map[string][]byte{} + + if tt.addMultipleCerts { + data["a.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("a not used for signing").Cert.Raw + data["b.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("b not used for signing").Cert.Raw + data["c.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("c not used for signing").Cert.Raw + } + + if !tt.provideNoCert { + data["notation.crt"] = certTuple.Cert.Raw + } + + if !tt.provideNoPolicy { + data["trustpolicy.json"] = policy + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "notation-config", + }, + Data: data, + } + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-trust-store", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + clientBuilder.WithObjects(repository, secret, caSecret) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + + chartUrl := fmt.Sprintf("oci://%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.shouldSign { + artifact := fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + remoteRepo, err := oras.NewRepository(artifact) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo.PlainHTTP = true + + repo := nr.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifact, + } + + _, err = notation.Sign(ctx, sg, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + } + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", chartUrl) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + pf := func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + } + + keys, err := cosign.GenerateKeyPair(pf) + g.Expect(err).ToNot(HaveOccurred()) + + err = os.WriteFile(path.Join(tmpDir, "cosign.key"), keys.PrivateBytes, 0600) + g.Expect(err).ToNot(HaveOccurred()) + + defer func() { + err := os.Remove(path.Join(tmpDir, "cosign.key")) + g.Expect(err).ToNot(HaveOccurred()) + }() + + tests := []struct { + name string + shouldSign bool + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + wantErrMsg string + assertConditions []metav1.Condition + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "unsigned charts should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + SecretRef: &meta.LocalObjectReference{Name: "cosign-key"}, + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "chart verification error: failed to verify : no signatures found", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), + }, + }, + { + name: "unsigned charts should not pass keyless verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), + }, + }, + { + name: "signed charts should pass verification", + shouldSign: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + SecretRef: &meta.LocalObjectReference{Name: "cosign-key"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = nil + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled '' chart with version ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cosign-key", + }, + Data: map[string][]byte{ + "cosign.pub": keys.PublicBytes, + }} + + clientBuilder.WithObjects(repository, secret) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + + chartUrl := fmt.Sprintf("oci://%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.shouldSign { + ko := coptions.KeyOpts{ + KeyRef: path.Join(tmpDir, "cosign.key"), + PassFunc: pf, + } + + ro := &coptions.RootOptions{ + Timeout: timeout, + } + + err = sign.SignCmd(ro, ko, coptions.SignOptions{ + Upload: true, + SkipConfirmation: true, + TlogUpload: false, + Registry: coptions.RegistryOptions{Keychain: oci.Anonymous{}, AllowHTTPRegistry: true}, + }, + []string{fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version)}) + g.Expect(err).ToNot(HaveOccurred()) + } + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", chartUrl) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +// extractChartMeta is used to extract a chart metadata from a byte array +func extractChartMeta(chartData []byte) (*hchart.Metadata, error) { + ch, err := loader.LoadArchive(bytes.NewReader(chartData)) + if err != nil { + return nil, err + } + return ch.Metadata, nil +} + +func loadTestChartToOCI(chartData []byte, server *registryClientTestServer, certFile, keyFile, cafile string) (*hchart.Metadata, error) { + // Login to the registry + err := server.registryClient.Login(server.registryHost, + helmreg.LoginOptBasicAuth(testRegistryUsername, testRegistryPassword), + helmreg.LoginOptTLSClientConfig(certFile, keyFile, cafile)) + if err != nil { + return nil, fmt.Errorf("failed to login to OCI registry: %w", err) + } + metadata, err := extractChartMeta(chartData) + if err != nil { + return nil, fmt.Errorf("failed to extract chart metadata: %w", err) + } + + // Upload the test chart + ref := fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + _, err = server.registryClient.Push(chartData, ref) + if err != nil { + return nil, fmt.Errorf("failed to push chart: %w", err) + } + + return metadata, nil +} diff --git a/internal/controller/helmrepository_controller.go b/internal/controller/helmrepository_controller.go new file mode 100644 index 000000000..06c4494cf --- /dev/null +++ b/internal/controller/helmrepository_controller.go @@ -0,0 +1,730 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/docker/go-units" + "github.com/opencontainers/go-digest" + helmgetter "helm.sh/helm/v3/pkg/getter" + helmreg "helm.sh/helm/v3/pkg/registry" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/cache" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/helm/getter" + "github.com/fluxcd/source-controller/internal/helm/repository" + intpredicates "github.com/fluxcd/source-controller/internal/predicates" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +// helmRepositoryReadyCondition contains the information required to summarize a +// v1.HelmRepository Ready Condition. +var helmRepositoryReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// helmRepositoryFailConditions contains the conditions that represent a +// failure. +var helmRepositoryFailConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.StorageOperationFailedCondition, +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// HelmRepositoryReconciler reconciles a v1.HelmRepository object. +type HelmRepositoryReconciler struct { + client.Client + kuberecorder.EventRecorder + helper.Metrics + + Getters helmgetter.Providers + Storage *storage.Storage + ControllerName string + + Cache *cache.Cache + TTL time.Duration + *cache.CacheRecorder + + patchOptions []patch.Option +} + +type HelmRepositoryReconcilerOptions struct { + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// helmRepositoryReconcileFunc is the function type for all the +// v1.HelmRepository (sub)reconcile functions. The type implementations +// are grouped and executed serially to perform the complete reconcile of the +// object. +type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) + +func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) +} + +func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error { + r.patchOptions = getPatchOptions(helmRepositoryReadyCondition.Owned, r.ControllerName) + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.HelmRepository{}). + WithEventFilter( + predicate.And( + intpredicates.HelmRepositoryOCIMigrationPredicate{}, + predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), + ), + ). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the HelmRepository + obj := &sourcev1.HelmRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // If it's of type OCI, migrate the object to static. + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { + return r.migrationToStatic(ctx, serialPatcher, obj) + } + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object after each reconciliation. + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmRepositoryReadyCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition + // between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []helmRepositoryReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the helmRepositoryReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var reconcileAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + reconcileAtVal = v + } + + // Persist reconciling if generation differs or reconciliation is requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + var chartRepo repository.ChartRepository + var artifact meta.Artifact + + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, &artifact, &chartRepo) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result for successful results. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, &chartRepo, res, resErr) + + return res, resErr +} + +// notify emits notification related to the reconciliation. +func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact and recovery from any + // failure. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + humanReadableSize := "unknown size" + if size := newObj.Status.Artifact.Size; size != nil { + humanReadableSize = fmt.Sprintf("size %s", units.HumanSize(float64(*size))) + } + + message := fmt.Sprintf("stored fetched index of %s from '%s'", humanReadableSize, chartRepo.URL) + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + "NewArtifact", message) + ctrl.LoggerFrom(ctx).Info(message) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, helmRepositoryFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) + } + } + } +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of any URL in the Status of the object are updated, to ensure +// they match the Storage server hostname of current runtime. +func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.HelmRepository, _ *meta.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource attempts to fetch the Helm repository index using the +// specified configuration on the v1.HelmRepository object. +// +// When the fetch fails, it records v1.FetchFailedCondition=True and +// returns early. +// If successful and the index is valid, any previous +// v1.FetchFailedCondition is removed, and the repository.ChartRepository +// pointer is set to the newly fetched index. +func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + // Ensure it's not an OCI URL. API validation ensures that only + // http/https/oci scheme are allowed. + if strings.HasPrefix(obj.Spec.URL, helmreg.OCIScheme) { + err := fmt.Errorf("'oci' URL scheme cannot be used with 'default' HelmRepository type") + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + normalizedURL, err := repository.NormalizeURL(obj.Spec.URL) + if err != nil { + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + clientOpts, _, err := getter.GetClientOpts(ctx, r.Client, obj, normalizedURL) + if err != nil { + if errors.Is(err, getter.ErrDeprecatedTLSConfig) { + ctrl.LoggerFrom(ctx). + Info("warning: specifying TLS authentication data via `.spec.secretRef` is deprecated, please use `.spec.certSecretRef` instead") + } else { + e := serror.NewGeneric( + err, + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Construct Helm chart repository with options and download index + newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, clientOpts.TlsConfig, clientOpts.GetterOpts...) + if err != nil { + switch err.(type) { + case *url.Error: + e := serror.NewStalling( + fmt.Errorf("invalid Helm repository URL: %w", err), + sourcev1.URLInvalidReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + default: + e := serror.NewStalling( + fmt.Errorf("failed to construct Helm client: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Fetch the repository index from remote. + if err := newChartRepo.CacheIndex(); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to fetch Helm repository index: %w", err), + meta.FailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + // Coin flip on transient or persistent error, return error and hope for the best + return sreconcile.ResultEmpty, e + } + *chartRepo = *newChartRepo + + // Early comparison to current Artifact. + if curArtifact := obj.GetArtifact(); curArtifact != nil { + curRev := digest.Digest(curArtifact.Revision) + if curRev.Validate() == nil { + // Short-circuit based on the fetched index being an exact match to the + // stored Artifact. + if newRev := chartRepo.Digest(curRev.Algorithm()); newRev.Validate() == nil && (newRev == curRev) { + *artifact = *curArtifact + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil + } + } + } + + // Load the cached repository index to ensure it passes validation. + if err := chartRepo.LoadFromPath(); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to load Helm repository from index YAML: %w", err), + sourcev1.IndexationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + // Delete any stale failure observation + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Calculate revision. + revision := chartRepo.Digest(intdigest.Canonical) + if revision.Validate() != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to calculate revision: %w", err), + sourcev1.IndexationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Mark observations about the revision on the object. + message := fmt.Sprintf("new index revision '%s'", revision) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + + // Create potential new artifact. + *artifact = r.Storage.NewArtifactFor(obj.Kind, + obj.ObjectMeta.GetObjectMeta(), + revision.String(), + fmt.Sprintf("index-%s.yaml", revision.Encoded()), + ) + + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact does not differ from the object's current, it returns +// early. +// On a successful archive, the Artifact in the Status of the object is set, +// and the symlink in the Storage is updated to its path. +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact: revision '%s'", artifact.Revision) + } + if err := chartRepo.Clear(); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file") + } + }() + + if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasDigest(artifact.Digest) { + // Extend TTL of the Index in the cache (if present). + if r.Cache != nil { + r.Cache.SetExpiration(artifact.Path, r.TTL) + } + + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Create artifact dir + if err := r.Storage.MkdirAll(*artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Acquire lock. + unlock, err := r.Storage.Lock(*artifact) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) + } + defer unlock() + + // Save artifact to storage in JSON format. + b, err := chartRepo.ToJSON() + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to get JSON index from chart repo: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + if err = r.Storage.Copy(artifact, bytes.NewBuffer(b)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to save artifact to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Record it on the object. + obj.Status.Artifact = artifact.DeepCopy() + + // Cache the index if it was successfully retrieved. + if r.Cache != nil && chartRepo.Index != nil { + // The cache keys have to be safe in multi-tenancy environments, as + // otherwise it could be used as a vector to bypass the repository's + // authentication. Using the Artifact.Path is safe as the path is in + // the format of: ///. + if err := r.Cache.Set(artifact.Path, chartRepo.Index, r.TTL); err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err) + } + } + + // Update index symlink. + indexURL, err := r.Storage.Symlink(*artifact, "index.yaml") + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to update status URL symlink: %s", err) + } + if indexURL != "" { + obj.Status.URL = indexURL + } + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list if we are deleting the object + if !obj.DeletionTimestamp.IsZero() { + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + } + + // Delete cache metrics. + if r.CacheRecorder != nil && r.Metrics.IsDelete(obj) { + r.DeleteCacheEvent(cache.CacheEventTypeHit, obj.Name, obj.Namespace) + r.DeleteCacheEvent(cache.CacheEventTypeMiss, obj.Name, obj.Namespace) + } + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless: +// - the deletion timestamp on the object is set +// - the obj.Spec.Type has changed and artifacts are not supported by the new type +// Which will result in the removal of all Artifacts for the objects. +func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { + if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + // Clean status sub-resource + obj.Status.Artifact = nil + obj.Status.URL = "" + // Remove any stale conditions. + obj.Status.Conditions = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// migrateToStatic is HelmRepository OCI migration to static object. +func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository) (result ctrl.Result, err error) { + // Skip migration if suspended and not being deleted. + if obj.Spec.Suspend && obj.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + if !intpredicates.HelmRepositoryOCIRequireMigration(obj) { + // Already migrated, nothing to do. + return ctrl.Result{}, nil + } + + // Delete any artifact. + _, err = r.reconcileDelete(ctx, obj) + if err != nil { + return ctrl.Result{}, err + } + // Delete finalizer and reset the status. + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + obj.Status = sourcev1.HelmRepositoryStatus{} + + if err := sp.Patch(ctx, obj); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} diff --git a/internal/controller/helmrepository_controller_test.go b/internal/controller/helmrepository_controller_test.go new file mode 100644 index 000000000..d76c58a42 --- /dev/null +++ b/internal/controller/helmrepository_controller_test.go @@ -0,0 +1,1919 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/opencontainers/go-digest" + helmgetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/repo" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/secrets" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/cache" + "github.com/fluxcd/source-controller/internal/helm/repository" + intpredicates "github.com/fluxcd/source-controller/internal/predicates" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" +) + +func TestHelmRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "helmrepo-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + helmrepo := &sourcev1.HelmRepository{} + helmrepo.Name = "test-helmrepo" + helmrepo.Namespace = namespaceName + helmrepo.Spec = sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: "https://example.com", + } + // Add a test finalizer to prevent the object from getting deleted. + helmrepo.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, helmrepo)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, helmrepo)).NotTo(HaveOccurred()) + + r := &HelmRepositoryReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(helmrepo)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + origObj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + obj := origObj.DeepCopy() + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) +} + +func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + if n != len(revisions)-1 { + time.Sleep(time.Second * 1) + } + } + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/reconcile-storage/d.txt", + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var chartRepo repository.ChartRepository + var artifact meta.Artifact + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(context.TODO(), sp, obj, &artifact, &chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + protocol string + server options + url string + secret *corev1.Secret + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) + revFunc func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTPS with certSecretRef non-matching CA succeeds via system CA pool", + protocol: "http", + url: "https://stefanprodan.github.io/podinfo", + want: sreconcile.ResultSuccess, + wantErr: false, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + }, + { + name: "HTTPS with certSecretRef makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTPS with secretRef and caFile key makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + // Regression test for: https://github.com/fluxcd/source-controller/issues/1218 + name: "HTTPS with docker config secretRef and caFile key makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + Namespace: "default", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTP without secretRef makes ArtifactOutdated=True", + protocol: "http", + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + // Regression test for: https://github.com/fluxcd/source-controller/issues/1218 + name: "HTTP with docker config secretRef sets Reconciling=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + t.Expect(artifact.Revision).ToNot(BeEmpty()) + }, + }, + { + name: "HTTPS with invalid CAFile in certSecretRef makes FetchFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + Namespace: "default", + }, + Data: map[string][]byte{ + "ca.crt": []byte("invalid"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to construct Helm client's TLS config: failed to parse CA certificate"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Invalid URL makes FetchFailed=True and returns stalling error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Unsupported scheme makes FetchFailed=True and returns stalling error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Missing secret returns FetchFailed=True and returns error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Malformed secret returns FetchFailed=True and returns error", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "malformed-basic-auth", + Namespace: "default", + }, + Data: map[string][]byte{ + "username": []byte("git"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secret 'default/malformed-basic-auth': malformed basic auth - has 'username' but missing 'password'"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + // No repo index due to fetch fail. + t.Expect(chartRepo.Path).To(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + t.Expect(artifact.Revision).To(BeEmpty()) + }, + }, + { + name: "Stored index with same revision", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar") + }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).To(BeNil()) + + t.Expect(&artifact).To(BeEquivalentTo(obj.Status.Artifact)) + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Stored index with different revision", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { + t.Expect(chartRepo.Path).ToNot(BeEmpty()) + t.Expect(chartRepo.Index).ToNot(BeNil()) + + t.Expect(artifact.Path).To(Not(BeEmpty())) + t.Expect(artifact.Revision).ToNot(Equal(obj.Status.Artifact.Revision)) + }, + want: sreconcile.ResultSuccess, + }, + { + name: "Existing artifact makes ArtifactOutdated=True", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ + Path: "some-path", + Revision: "some-rev", + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), + }, + }, + } + + for _, tt := range tests { + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + Generation: 1, + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + + g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(server.GenerateIndex()).To(Succeed()) + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) + } + + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + server.Start() + defer server.Stop() + obj.Spec.URL = server.URL() + if tt.url != "" { + obj.Spec.URL = tt.url + } + case "https": + g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + defer server.Stop() + obj.Spec.URL = server.URL() + if tt.url != "" { + obj.Spec.URL = tt.url + } + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}) + + if secret != nil { + clientBuilder.WithObjects(secret.DeepCopy()) + } + + var rev digest.Digest + if tt.revFunc != nil { + rev = tt.revFunc(g, server, secret) + } + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + Storage: testStorage, + Getters: testGetters, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj) + } + + // Special handling for tests that need to set revision after calculation + if tt.name == "Stored index with same revision" && rev != "" { + obj.Status.Artifact = &meta.Artifact{ + Revision: rev.String(), + } + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + var chartRepo repository.ChartRepository + var artifact meta.Artifact + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(context.TODO(), sp, obj, &artifact, &chartRepo) + defer os.Remove(chartRepo.Path) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, artifact, &chartRepo) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + cache *cache.Cache + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes ArtifactInStorage=True and artifact is stored as JSON", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + b, err := os.ReadFile(localPath) + t.Expect(err).To(Not(HaveOccurred())) + t.Expect(json.Valid(b)).To(BeTrue()) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Archiving (loaded) artifact to storage adds to cache", + cache: cache.New(10, time.Minute), + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + index.Index = &repo.IndexFile{ + APIVersion: "v1", + Generated: time.Now(), + } + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) { + i, ok := cache.Get(obj.GetArtifact().Path) + t.Expect(ok).To(BeTrue()) + t.Expect(i).To(BeAssignableToTypeOf(&repo.IndexFile{})) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact: revision 'existing'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}). + Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + Cache: tt.cache, + TTL: 1 * time.Minute, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Timeout: &metav1.Duration{Duration: timeout}, + URL: "https://example.com/index.yaml", + }, + } + + chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil) + g.Expect(err).ToNot(HaveOccurred()) + chartRepo.Index = &repo.IndexFile{} + + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + // Digest of the index file calculated by the ChartRepository. + artifact.Digest = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, chartRepo) + } + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(context.TODO(), sp, obj, &artifact, chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tt.cache) + } + }) + } +} + +func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmRepositoryReconcileFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc { + return func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmRepositoryReconcileFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "processing object: new generation 2 -> 3"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmRepositoryReconcileFunc{ + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmRepositoryReconcileFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "reconciliation in progress"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + Client: fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.HelmRepository{}). + Build(), + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmRepositoryStatus{ + ObservedGeneration: tt.observedGeneration, + }, + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + ctx := context.TODO() + sp := patch.NewSerialPatcher(obj, r.Client) + + gotRes, gotErr := r.reconcile(ctx, sp, obj, tt.reconcileFuncs) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + g.Expect(gotRes).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository) + assertConditions []metav1.Condition + wantErr bool + }{ + { + name: "positive conditions only", + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: false, + }, + { + name: "multiple failures", + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"), + }, + wantErr: true, + }, + { + name: "mixed positive and negative conditions", + beforeFunc: func(obj *sourcev1.HelmRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmrepo", + Namespace: "foo", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithObjects(obj). + WithStatusSubresource(&sourcev1.HelmRepository{}) + + c := clientBuilder.Build() + + serialPatcher := patch.NewSerialPatcher(obj, c) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmRepositoryReadyCondition), + summarize.WithReconcileResult(sreconcile.ResultSuccess), + summarize.WithIgnoreNotFound(), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}), + summarize.WithPatchFieldOwner("source-controller"), + } + _, err := summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + g.Expect(err != nil).To(Equal(tt.wantErr)) + + key := client.ObjectKeyFromObject(obj) + g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred()) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmRepositoryReconciler_notify(t *testing.T) { + var aSize int64 = 30000 + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.HelmRepository) + newObjBeforeFunc func(obj *sourcev1.HelmRepository) + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact with nil size", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: nil} + }, + wantEvent: "Normal NewArtifact stored fetched index of unknown size", + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + }, + wantEvent: "Normal NewArtifact stored fetched index of size", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal Succeeded stored fetched index of size", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal NewArtifact stored fetched index of size", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.HelmRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &HelmRepositoryReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), + } + chartRepo := repository.ChartRepository{ + URL: "some-address", + } + reconciler.notify(ctx, oldObj, newObj, &chartRepo, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Switch to a OCI helm repository type + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + obj.Spec.URL = fmt.Sprintf("oci://%s", testRegistryServer.registryHost) + + oldGen := obj.GetGeneration() + g.Expect(testEnv.Update(ctx, obj)).To(Succeed()) + newGen := oldGen + 1 + + // Wait for HelmRepository to become static for new generation. + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return newGen == obj.Generation && + !intpredicates.HelmRepositoryOCIRequireMigration(obj) + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_ReconcileSpecUpdatePredicateFilter(t *testing.T) { + g := NewWithT(t) + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Change spec Interval to validate spec update + obj.Spec.Interval = metav1.Duration{Duration: interval + time.Second} + oldGen := obj.GetGeneration() + g.Expect(testEnv.Update(ctx, obj)).To(Succeed()) + newGen := oldGen + 1 + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + newGen == readyCondition.ObservedGeneration && + newGen == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns = &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity} + checker = conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_InMemoryCaching(t *testing.T) { + g := NewWithT(t) + testCache.Clear() + + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) + + g.Expect(testServer.PackageChartWithVersion("testdata/charts/helmchart", "0.1.0")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) + + testServer.Start() + defer testServer.Stop() + + ns, err := testEnv.CreateNamespace(ctx, "helmrepository") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + helmRepo := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: testServer.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, helmRepo)).To(Succeed()) + + key := client.ObjectKey{Name: helmRepo.Name, Namespace: helmRepo.Namespace} + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, helmRepo); err != nil { + return false + } + return len(helmRepo.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, helmRepo); err != nil { + return false + } + if !conditions.IsReady(helmRepo) || helmRepo.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(helmRepo, meta.ReadyCondition) + return helmRepo.Generation == readyCondition.ObservedGeneration && + helmRepo.Generation == helmRepo.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + err = testEnv.Get(ctx, key, helmRepo) + g.Expect(err).ToNot(HaveOccurred()) + _, cacheHit := testCache.Get(helmRepo.GetArtifact().Path) + g.Expect(cacheHit).To(BeTrue()) + + g.Expect(testEnv.Delete(ctx, helmRepo)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, helmRepo); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_ociMigration(t *testing.T) { + g := NewWithT(t) + + testns, err := testEnv.CreateNamespace(ctx, "hr-oci-migration-test") + g.Expect(err).ToNot(HaveOccurred()) + + t.Cleanup(func() { + g.Expect(testEnv.Cleanup(ctx, testns)).ToNot(HaveOccurred()) + }) + + hr := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("hr-%s", randStringRunes(5)), + Namespace: testns.Name, + }, + } + hrKey := client.ObjectKeyFromObject(hr) + + // Migrates newly created object with finalizer. + + hr.ObjectMeta.Finalizers = append(hr.ObjectMeta.Finalizers, "foo.bar", sourcev1.SourceFinalizer) + hr.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + URL: "oci://foo/bar", + Interval: metav1.Duration{Duration: interval}, + } + g.Expect(testEnv.Create(ctx, hr)).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + _ = testEnv.Get(ctx, hrKey, hr) + return !intpredicates.HelmRepositoryOCIRequireMigration(hr) + }, timeout, time.Second).Should(BeTrue()) + + // Migrates updated object with finalizer. + + patchHelper, err := patch.NewHelper(hr, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + hr.ObjectMeta.Finalizers = append(hr.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) + hr.Spec.URL = "oci://foo/baz" + g.Expect(patchHelper.Patch(ctx, hr)).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + _ = testEnv.Get(ctx, hrKey, hr) + return !intpredicates.HelmRepositoryOCIRequireMigration(hr) + }, timeout, time.Second).Should(BeTrue()) + + // Migrates deleted object with finalizer. + + patchHelper, err = patch.NewHelper(hr, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + + // Suspend the object to prevent finalizer from getting removed. + // Ensure only flux finalizer is set to allow the object to be garbage + // collected at the end. + // NOTE: Suspending and updating finalizers are done separately here as + // doing them in a single patch results in flaky test where the finalizer + // update doesn't gets registered with the kube-apiserver, resulting in + // timeout waiting for finalizer to appear on the object below. + hr.Spec.Suspend = true + g.Expect(patchHelper.Patch(ctx, hr)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + _ = k8sClient.Get(ctx, hrKey, hr) + return hr.Spec.Suspend == true + }, timeout).Should(BeTrue()) + + patchHelper, err = patch.NewHelper(hr, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + + // Add finalizer and verify that finalizer exists on the object using a live + // client. + hr.ObjectMeta.Finalizers = []string{sourcev1.SourceFinalizer} + g.Expect(patchHelper.Patch(ctx, hr)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + _ = k8sClient.Get(ctx, hrKey, hr) + return controllerutil.ContainsFinalizer(hr, sourcev1.SourceFinalizer) + }, timeout).Should(BeTrue()) + + // Delete the object and verify. + g.Expect(testEnv.Delete(ctx, hr)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, hrKey, hr); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} diff --git a/internal/controller/ocirepository_controller.go b/internal/controller/ocirepository_controller.go new file mode 100644 index 000000000..a91c8a51b --- /dev/null +++ b/internal/controller/ocirepository_controller.go @@ -0,0 +1,1383 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + cryptotls "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/google/go-containerregistry/pkg/name" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/sigstore/cosign/v2/pkg/cosign" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/oci" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/predicates" + rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" + "github.com/fluxcd/pkg/sourceignore" + "github.com/fluxcd/pkg/tar" + "github.com/fluxcd/pkg/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + soci "github.com/fluxcd/source-controller/internal/oci" + scosign "github.com/fluxcd/source-controller/internal/oci/cosign" + "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" +) + +// ociRepositoryReadyCondition contains the information required to summarize a +// v1.OCIRepository Ready Condition. +var ociRepositoryReadyCondition = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.ArtifactInStorageCondition, + sourcev1.SourceVerifiedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.StorageOperationFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + +// ociRepositoryFailConditions contains the conditions that represent a failure. +var ociRepositoryFailConditions = []string{ + sourcev1.FetchFailedCondition, + sourcev1.StorageOperationFailedCondition, +} + +type filterFunc func(tags []string) ([]string, error) + +type invalidOCIURLError struct { + err error +} + +func (e invalidOCIURLError) Error() string { + return e.err.Error() +} + +// ociRepositoryReconcileFunc is the function type for all the v1.OCIRepository +// (sub)reconcile functions. The type implementations are grouped and +// executed serially to perform the complete reconcile of the object. +type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) + +// OCIRepositoryReconciler reconciles a v1.OCIRepository object +type OCIRepositoryReconciler struct { + client.Client + helper.Metrics + kuberecorder.EventRecorder + + Storage *storage.Storage + ControllerName string + TokenCache *cache.TokenCache + requeueDependency time.Duration + + patchOptions []patch.Option +} + +type OCIRepositoryReconcilerOptions struct { + DependencyRequeueInterval time.Duration + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OCIRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, OCIRepositoryReconcilerOptions{}) +} + +func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts OCIRepositoryReconcilerOptions) error { + r.patchOptions = getPatchOptions(ociRepositoryReadyCondition.Owned, r.ControllerName) + + r.requeueDependency = opts.DependencyRequeueInterval + + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.OCIRepository{}, builder.WithPredicates( + predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), + )). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }). + Complete(r) +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/finalizers,verbs=get;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create + +func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + start := time.Now() + log := ctrl.LoggerFrom(ctx) + + // Fetch the OCIRepository + obj := &sourcev1.OCIRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the patch helper with the current version of the object. + serialPatcher := patch.NewSerialPatcher(obj, r.Client) + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(ociRepositoryReadyCondition), + summarize.WithBiPolarityConditionTypes(sourcev1.SourceVerifiedCondition), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.ErrorActionHandler, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ + RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()), + }), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record duration metrics. + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Examine if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Add finalizer first if not exist to avoid the race condition between init + // and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp + // is not set. + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Return if the object is suspended. + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") + recResult, retErr = sreconcile.ResultEmpty, nil + return + } + + // Reconcile actual object + reconcilers := []ociRepositoryReconcileFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) + return +} + +// reconcile iterates through the ociRepositoryReconcileFunc tasks for the +// object. It returns early on the first call that returns +// reconcile.ResultRequeue, or produces an error. +func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { + oldObj := obj.DeepCopy() + + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") + + var reconcileAtVal string + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + reconcileAtVal = v + } + + // Persist reconciling status if generation differs or reconciliation is + // requested. + switch { + case obj.Generation != obj.Status.ObservedGeneration: + rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, + "processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest(): + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + } + + // Create temp working dir + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create temporary working directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer func() { + if err = os.RemoveAll(tmpDir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory") + } + }() + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + + var ( + res sreconcile.Result + resErr error + metadata = meta.Artifact{} + ) + + // Run the sub-reconcilers and build the result of reconciliation. + for _, rec := range reconcilers { + recResult, err := rec(ctx, sp, obj, &metadata, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + + r.notify(ctx, oldObj, obj, res, resErr) + + return res, resErr +} + +// reconcileSource fetches the upstream OCI artifact metadata and content. +// If this fails, it records v1.FetchFailedCondition=True on the object and returns early. +func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { + var authenticator authn.Authenticator + + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + // Remove previously failed source verification status conditions. The + // failing verification should be recalculated. But an existing successful + // verification need not be removed as it indicates verification of previous + // version. + if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } + + // Generate the registry credential keychain either from static credentials or using cloud OIDC + keychain, err := r.keychain(ctx, obj) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get credential: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + var proxyURL *url.URL + if obj.Spec.ProxySecretRef != nil { + var err error + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + }) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get proxy address: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider && ok { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + return sreconcile.ResultEmpty, serror.NewStalling(err, meta.FeatureGateDisabledReason) + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.OCIRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } + var authErr error + authenticator, authErr = soci.OIDCAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider, opts...) + if authErr != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Generate the transport for remote operations + transport, err := r.transport(ctx, obj, proxyURL) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to generate transport for '%s': %w", obj.Spec.URL, err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + opts := makeRemoteOptions(ctx, transport, keychain, authenticator) + + // Determine which artifact revision to pull + ref, err := r.getArtifactRef(obj, opts) + if err != nil { + if _, ok := err.(invalidOCIURLError); ok { + e := serror.NewStalling( + fmt.Errorf("URL validation failed for '%s': %w", obj.Spec.URL, err), + sourcev1.URLInvalidReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + e := serror.NewGeneric( + fmt.Errorf("failed to determine the artifact tag for '%s': %w", obj.Spec.URL, err), + sourcev1.ReadOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Get the upstream revision from the artifact digest + // TODO: getRevision resolves the digest, which may change before image is fetched, so it should probaly update ref + revision, err := r.getRevision(ref, opts) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to determine artifact digest: %w", err), + sourcev1.OCIPullFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + metaArtifact := &meta.Artifact{Revision: revision} + metaArtifact.DeepCopyInto(metadata) + + // Mark observations about the revision on the object + defer func() { + if !obj.GetArtifact().HasRevision(revision) { + message := fmt.Sprintf("new revision '%s' for '%s'", revision, ref) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + return + } + } + }() + + // Verify artifact if: + // - the upstream digest differs from the one in storage (revision drift) + // - the OCIRepository spec has changed (generation drift) + // - the previous reconciliation resulted in a failed artifact verification (retry with exponential backoff) + if obj.Spec.Verify == nil { + // Remove old observations if verification was disabled + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + } else if !obj.GetArtifact().HasRevision(revision) || + conditions.GetObservedGeneration(obj, sourcev1.SourceVerifiedCondition) != obj.Generation || + conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { + + result, err := r.verifySignature(ctx, obj, ref, keychain, authenticator, transport, opts...) + if err != nil { + provider := obj.Spec.Verify.Provider + if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { + provider = fmt.Sprintf("%s keyless", provider) + } + e := serror.NewGeneric( + fmt.Errorf("failed to verify the signature using provider '%s': %w", provider, err), + sourcev1.VerificationError, + ) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + if result == soci.VerificationResultSuccess { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision %s", revision) + } + } + + // Skip pulling if the artifact revision and the source configuration has + // not changed. + if obj.GetArtifact().HasRevision(revision) && !ociContentConfigChanged(obj) { + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil + } + + // Pull artifact from the remote container registry + img, err := remote.Image(ref, opts...) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err), + sourcev1.OCIPullFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Copy the OCI annotations to the internal artifact metadata + manifest, err := img.Manifest() + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to parse artifact manifest: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + metadata.Metadata = manifest.Annotations + + // Extract the compressed content from the selected layer + blob, err := r.selectLayer(obj, img) + if err != nil { + e := serror.NewGeneric(err, sourcev1.OCILayerOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Persist layer content to storage using the specified operation + switch obj.GetLayerOperation() { + case sourcev1.OCILayerExtract: + if err = tar.Untar(blob, dir, tar.WithMaxUntarSize(-1), tar.WithSkipSymlinks()); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to extract layer contents from artifact: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + case sourcev1.OCILayerCopy: + metadata.Path = fmt.Sprintf("%s.tgz", r.digestFromRevision(metadata.Revision)) + file, err := os.Create(filepath.Join(dir, metadata.Path)) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create file to copy layer to: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + defer file.Close() + + _, err = io.Copy(file, blob) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to copy layer from artifact: %w", err), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + default: + e := serror.NewGeneric( + fmt.Errorf("unsupported layer operation: %s", obj.GetLayerOperation()), + sourcev1.OCILayerOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + conditions.Delete(obj, sourcev1.FetchFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// selectLayer finds the matching layer and returns its compressed contents. +// If no layer selector was provided, we pick the first layer from the OCI artifact. +func (r *OCIRepositoryReconciler) selectLayer(obj *sourcev1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) { + layers, err := image.Layers() + if err != nil { + return nil, fmt.Errorf("failed to parse artifact layers: %w", err) + } + + if len(layers) < 1 { + return nil, fmt.Errorf("no layers found in artifact") + } + + var layer gcrv1.Layer + switch { + case obj.GetLayerMediaType() != "": + var found bool + for i, l := range layers { + md, err := l.MediaType() + if err != nil { + return nil, fmt.Errorf("failed to determine the media type of layer[%v] from artifact: %w", i, err) + } + if string(md) == obj.GetLayerMediaType() { + layer = layers[i] + found = true + break + } + } + if !found { + return nil, fmt.Errorf("failed to find layer with media type '%s' in artifact", obj.GetLayerMediaType()) + } + default: + layer = layers[0] + } + + blob, err := layer.Compressed() + if err != nil { + return nil, fmt.Errorf("failed to extract the first layer from artifact: %w", err) + } + + return blob, nil +} + +// getRevision fetches the upstream digest, returning the revision in the +// format '@'. +func (r *OCIRepositoryReconciler) getRevision(ref name.Reference, options []remote.Option) (string, error) { + switch ref := ref.(type) { + case name.Digest: + digest, err := gcrv1.NewHash(ref.DigestStr()) + if err != nil { + return "", err + } + return digest.String(), nil + case name.Tag: + var digest gcrv1.Hash + + desc, err := remote.Head(ref, options...) + if err == nil { + digest = desc.Digest + } else { + rdesc, err := remote.Get(ref, options...) + if err != nil { + return "", err + } + digest = rdesc.Descriptor.Digest + } + return fmt.Sprintf("%s@%s", ref.TagStr(), digest.String()), nil + default: + return "", fmt.Errorf("unsupported reference type: %T", ref) + } +} + +// digestFromRevision extracts the digest from the revision string. +func (r *OCIRepositoryReconciler) digestFromRevision(revision string) string { + parts := strings.Split(revision, "@") + return parts[len(parts)-1] +} + +// verifySignature verifies the authenticity of the given image reference URL. +// It supports two different verification providers: cosign and notation. +// First, it tries to use a key if a Secret with a valid public key is provided. +// If not, when using cosign it falls back to a keyless approach for verification. +// When notation is used, a trust policy is required to verify the image. +// The verification result is returned as a VerificationResult and any error encountered. +func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.OCIRepository, + ref name.Reference, keychain authn.Keychain, auth authn.Authenticator, + transport *http.Transport, opt ...remote.Option) (soci.VerificationResult, error) { + + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + + provider := obj.Spec.Verify.Provider + switch provider { + case "cosign": + defaultCosignOciOpts := []scosign.Options{ + scosign.WithRemoteOptions(opt...), + } + + // get the public keys from the given secret + if secretRef := obj.Spec.Verify.SecretRef; secretRef != nil { + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctxTimeout, verifySecret) + if err != nil { + return soci.VerificationResultFailed, err + } + + signatureVerified := soci.VerificationResultFailed + for k, data := range pubSecret.Data { + // search for public keys in the secret + if strings.HasSuffix(k, ".pub") { + verifier, err := scosign.NewCosignVerifier(ctxTimeout, append(defaultCosignOciOpts, scosign.WithPublicKey(data))...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil || result == soci.VerificationResultFailed { + continue + } + + if result == soci.VerificationResultSuccess { + signatureVerified = result + break + } + } + } + + if signatureVerified == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return soci.VerificationResultSuccess, nil + } + + // if no secret is provided, try keyless verification + ctrl.LoggerFrom(ctx).Info("no secret reference is provided, trying to verify the image using keyless method") + + var identities []cosign.Identity + for _, match := range obj.Spec.Verify.MatchOIDCIdentity { + identities = append(identities, cosign.Identity{ + IssuerRegExp: match.Issuer, + SubjectRegExp: match.Subject, + }) + } + defaultCosignOciOpts = append(defaultCosignOciOpts, scosign.WithIdentities(identities)) + + verifier, err := scosign.NewCosignVerifier(ctxTimeout, defaultCosignOciOpts...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil { + return soci.VerificationResultFailed, err + } + + if result == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return soci.VerificationResultSuccess, nil + + case "notation": + // get the public keys from the given secret + secretRef := obj.Spec.Verify.SecretRef + + if secretRef == nil { + return soci.VerificationResultFailed, fmt.Errorf("verification secret cannot be empty: '%s'", ref) + } + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctxTimeout, verifySecret) + if err != nil { + return soci.VerificationResultFailed, err + } + + data, ok := pubSecret.Data[notation.DefaultTrustPolicyKey] + if !ok { + return soci.VerificationResultFailed, fmt.Errorf("'%s' not found in secret '%s'", notation.DefaultTrustPolicyKey, verifySecret.String()) + } + + var doc trustpolicy.Document + + if err := json.Unmarshal(data, &doc); err != nil { + return soci.VerificationResultFailed, fmt.Errorf("error occurred while parsing %s: %w", notation.DefaultTrustPolicyKey, err) + } + + var certs [][]byte + + for k, data := range pubSecret.Data { + if strings.HasSuffix(k, ".crt") || strings.HasSuffix(k, ".pem") { + certs = append(certs, data) + } + } + + if certs == nil { + return soci.VerificationResultFailed, fmt.Errorf("no certificates found in secret '%s'", verifySecret.String()) + } + + trustPolicy := notation.CleanTrustPolicy(&doc, ctrl.LoggerFrom(ctx)) + defaultNotationOciOpts := []notation.Options{ + notation.WithTrustPolicy(trustPolicy), + notation.WithRemoteOptions(opt...), + notation.WithAuth(auth), + notation.WithKeychain(keychain), + notation.WithInsecureRegistry(obj.Spec.Insecure), + notation.WithLogger(ctrl.LoggerFrom(ctx)), + notation.WithRootCertificates(certs), + notation.WithTransport(transport), + } + + verifier, err := notation.NewNotationVerifier(defaultNotationOciOpts...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil { + return result, err + } + + if result == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return result, nil + default: + return soci.VerificationResultFailed, fmt.Errorf("unsupported verification provider: %s", obj.Spec.Verify.Provider) + } +} + +// retrieveSecret retrieves a secret from the specified namespace with the given secret name. +// It returns the retrieved secret and any error encountered during the retrieval process. +func (r *OCIRepositoryReconciler) retrieveSecret(ctx context.Context, verifySecret types.NamespacedName) (corev1.Secret, error) { + var pubSecret corev1.Secret + + if err := r.Get(ctx, verifySecret, &pubSecret); err != nil { + return corev1.Secret{}, err + } + return pubSecret, nil +} + +// parseRepository validates and extracts the repository URL. +func (r *OCIRepositoryReconciler) parseRepository(obj *sourcev1.OCIRepository) (name.Repository, error) { + if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) { + return name.Repository{}, fmt.Errorf("URL must be in format 'oci:////'") + } + + url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) + + options := []name.Option{} + if obj.Spec.Insecure { + options = append(options, name.Insecure) + } + repo, err := name.NewRepository(url, options...) + if err != nil { + return name.Repository{}, err + } + + imageName := strings.TrimPrefix(url, repo.RegistryStr()) + if s := strings.Split(imageName, ":"); len(s) > 1 { + return name.Repository{}, fmt.Errorf("URL must not contain a tag; remove ':%s'", s[1]) + } + + return repo, nil +} + +// getArtifactRef determines which tag or revision should be used and returns the OCI artifact FQN. +func (r *OCIRepositoryReconciler) getArtifactRef(obj *sourcev1.OCIRepository, options []remote.Option) (name.Reference, error) { + repo, err := r.parseRepository(obj) + if err != nil { + return nil, invalidOCIURLError{err} + } + + if obj.Spec.Reference != nil { + if obj.Spec.Reference.Digest != "" { + return repo.Digest(obj.Spec.Reference.Digest), nil + } + + if obj.Spec.Reference.SemVer != "" { + return r.getTagBySemver(repo, obj.Spec.Reference.SemVer, filterTags(obj.Spec.Reference.SemverFilter), options) + } + + if obj.Spec.Reference.Tag != "" { + return repo.Tag(obj.Spec.Reference.Tag), nil + } + } + + return repo.Tag(name.DefaultTag), nil +} + +// getTagBySemver call the remote container registry, fetches all the tags from the repository, +// and returns the latest tag according to the semver expression. +func (r *OCIRepositoryReconciler) getTagBySemver(repo name.Repository, exp string, filter filterFunc, options []remote.Option) (name.Reference, error) { + tags, err := remote.List(repo, options...) + if err != nil { + return nil, err + } + + validTags, err := filter(tags) + if err != nil { + return nil, err + } + + constraint, err := semver.NewConstraint(exp) + if err != nil { + return nil, fmt.Errorf("semver '%s' parse error: %w", exp, err) + } + + var matchingVersions []*semver.Version + for _, t := range validTags { + v, err := version.ParseVersion(t) + if err != nil { + continue + } + + if constraint.Check(v) { + matchingVersions = append(matchingVersions, v) + } + } + + if len(matchingVersions) == 0 { + return nil, fmt.Errorf("no match found for semver: %s", exp) + } + + sort.Sort(sort.Reverse(semver.Collection(matchingVersions))) + return repo.Tag(matchingVersions[0].Original()), nil +} + +// keychain generates the credential keychain based on the resource +// configuration. If no auth is specified a default keychain with +// anonymous access is returned +func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) { + var imagePullSecrets []corev1.Secret + + // lookup auth secret + if obj.Spec.SecretRef != nil { + var imagePullSecret corev1.Secret + secretRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.SecretRef.Name} + err := r.Get(ctx, secretRef, &imagePullSecret) + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.AuthenticationFailedReason, + "auth secret '%s' not found", obj.Spec.SecretRef.Name) + return nil, fmt.Errorf("failed to get secret '%s': %w", secretRef, err) + } + imagePullSecrets = append(imagePullSecrets, imagePullSecret) + } + + // lookup service account + if obj.Spec.ServiceAccountName != "" { + saRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.ServiceAccountName} + saSecrets, err := secrets.PullSecretsFromServiceAccountRef(ctx, r.Client, saRef) + if err != nil { + return nil, err + } + imagePullSecrets = append(imagePullSecrets, saSecrets...) + } + + // if no pullsecrets available return an AnonymousKeychain + if len(imagePullSecrets) == 0 { + return soci.Anonymous{}, nil + } + + return k8schain.NewFromPullSecrets(ctx, imagePullSecrets) +} + +// transport clones the default transport from remote and when a certSecretRef is specified, +// the returned transport will include the TLS client and/or CA certificates. +// If the insecure flag is set, the transport will skip the verification of the server's certificate. +// Additionally, if a proxy is specified, transport will use it. +func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository, proxyURL *url.URL) (*http.Transport, error) { + transport := remote.DefaultTransport.(*http.Transport).Clone() + + tlsConfig, err := r.getTLSConfig(ctx, obj) + if err != nil { + return nil, err + } + if tlsConfig != nil { + transport.TLSClientConfig = tlsConfig + } + + if proxyURL != nil { + transport.Proxy = http.ProxyURL(proxyURL) + } + + return transport, nil +} + +// getTLSConfig gets the TLS configuration for the transport based on the +// specified secret reference in the OCIRepository object, or the insecure flag. +func (r *OCIRepositoryReconciler) getTLSConfig(ctx context.Context, obj *sourcev1.OCIRepository) (*cryptotls.Config, error) { + if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" { + if obj.Spec.Insecure { + // NOTE: This is the only place in Flux where InsecureSkipVerify is allowed. + // This exception is made for OCIRepository to maintain backward compatibility + // with tools like crane that require insecure connections without certificates. + // This only applies when no CertSecretRef is provided AND insecure is explicitly set. + // All other controllers must NOT allow InsecureSkipVerify per our security policy. + return &cryptotls.Config{ + InsecureSkipVerify: true, + }, nil + } + return nil, nil + } + + secretName := types.NamespacedName{ + Namespace: obj.Namespace, + Name: obj.Spec.CertSecretRef.Name, + } + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures source-controller continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + return secrets.TLSConfigFromSecretRef(ctx, r.Client, secretName, tlsOpts...) +} + +// reconcileStorage ensures the current state of the storage matches the +// desired and previously observed state. +// +// The garbage collection is executed based on the flag configured settings and +// may remove files that are beyond their TTL or the maximum number of files +// to survive a collection cycle. +// If the Artifact in the Status of the object disappeared from the Storage, +// it is removed from the object. +// If the object does not have an Artifact in its Status, a Reconciling +// condition is added. +// The hostname of any URL in the Status of the object are updated, to ensure +// they match the Storage server hostname of current runtime. +func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.OCIRepository, _ *meta.Artifact, _ string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + var artifactMissing bool + if artifact := obj.GetArtifact(); artifact != nil { + // Determine if the advertised artifact is still in storage + if !r.Storage.ArtifactExist(*artifact) { + artifactMissing = true + } + + // If the artifact is in storage, verify if the advertised digest still + // matches the actual artifact + if !artifactMissing { + if err := r.Storage.VerifyArtifact(*artifact); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error()) + + if err = r.Storage.Remove(*artifact); err != nil { + return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err) + } + + artifactMissing = true + } + } + + // If the artifact is missing, remove it from the object + if artifactMissing { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + msg := "building artifact" + if artifactMissing { + msg += ": disappeared from storage" + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) + conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) + } + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact archives a new Artifact to the Storage, if the current +// (Status) data on the object does not match the given. +// +// The inspection of the given data to the object is differed, ensuring any +// stale observations like v1.ArtifactOutdatedCondition are removed. +// If the given Artifact does not differ from the object's current, it returns +// early. +// On a successful archive, the Artifact in the Status of the object is set, +// and the symlink in the Storage is updated to its path. +func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { + // Create artifact + artifact := r.Storage.NewArtifactFor(obj.Kind, obj, metadata.Revision, + fmt.Sprintf("%s.tar.gz", r.digestFromRevision(metadata.Revision))) + + // Set the ArtifactInStorageCondition if there's no drift. + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) && !ociContentConfigChanged(obj) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, + "stored artifact for digest '%s'", artifact.Revision) + } + }() + + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) && !ociContentConfigChanged(obj) { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, + "artifact up-to-date with remote revision: '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to stat source path: %w", err), + sourcev1.StatOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } else if !f.IsDir() { + e := serror.NewGeneric( + fmt.Errorf("source path '%s' is not a directory", dir), + sourcev1.InvalidPathReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to create artifact directory: %w", err), + sourcev1.DirCreationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to acquire lock for artifact: %w", err), + meta.FailedReason, + ) + } + defer unlock() + + switch obj.GetLayerOperation() { + case sourcev1.OCILayerCopy: + if err = r.Storage.CopyFromPath(&artifact, filepath.Join(dir, metadata.Path)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to copy artifact to storage: %w", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + default: + // Load ignore rules for archiving. + ignoreDomain := strings.Split(dir, string(filepath.Separator)) + ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain) + if err != nil { + return sreconcile.ResultEmpty, serror.NewGeneric( + fmt.Errorf("failed to load source ignore patterns from repository: %w", err), + "SourceIgnoreError", + ) + } + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) + } + + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + e := serror.NewGeneric( + fmt.Errorf("unable to archive artifact to storage: %s", err), + sourcev1.ArchiveOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + // Record the observations on the object. + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.Artifact.Metadata = metadata.Metadata + obj.Status.ObservedIgnore = obj.Spec.Ignore + obj.Status.ObservedLayerSelector = obj.Spec.LayerSelector + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, + "failed to update status URL symlink: %s", err) + } + if url != "" { + obj.Status.URL = url + } + conditions.Delete(obj, sourcev1.StorageOperationFailedCondition) + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the deletion of the object. +// It first garbage collects all Artifacts for the object from the Storage. +// Removing the finalizer from the object if successful. +func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.OCIRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given object. +// +// It removes all but the current Artifact from the Storage, unless the +// deletion timestamp on the object is set. Which will result in the +// removal of all Artifacts for the objects. +func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection for deleted resource failed: %w", err), + "GarbageCollectionFailed", + ) + } else if deleted != "" { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) + if err != nil { + return serror.NewGeneric( + fmt.Errorf("garbage collection of artifacts failed: %w", err), + "GarbageCollectionFailed", + ) + } + if len(delFiles) > 0 { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected %d artifacts", len(delFiles)) + return nil + } + } + return nil +} + +// eventLogf records events, and logs at the same time. +// +// This log is different from the debug log in the EventRecorder, in the sense +// that this is a simple log. While the debug log contains complete details +// about the event. +func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// notify emits notification related to the reconciliation. +func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) { + // Notify successful reconciliation for new artifact and recovery from any + // failure. + if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { + annotations := map[string]string{ + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, + fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, + } + + message := fmt.Sprintf("stored artifact with revision '%s' from '%s'", newObj.Status.Artifact.Revision, newObj.Spec.URL) + + // enrich message with upstream annotations if found + if info := newObj.GetArtifact().Metadata; info != nil { + var source, revision string + if val, ok := info[oci.SourceAnnotation]; ok { + source = val + } + if val, ok := info[oci.RevisionAnnotation]; ok { + revision = val + } + if source != "" && revision != "" { + message = fmt.Sprintf("%s, origin source '%s', origin revision '%s'", message, source, revision) + } + } + + // Notify on new artifact and failure recovery. + if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + "NewArtifact", message) + ctrl.LoggerFrom(ctx).Info(message) + } else { + if sreconcile.FailureRecovery(oldObj, newObj, ociRepositoryFailConditions) { + r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, + meta.SucceededReason, message) + ctrl.LoggerFrom(ctx).Info(message) + } + } + } +} + +// makeRemoteOptions returns a remoteOptions struct with the authentication and transport options set. +// The returned struct can be used to interact with a remote registry using go-containerregistry based libraries. +func makeRemoteOptions(ctxTimeout context.Context, transport http.RoundTripper, + keychain authn.Keychain, auth authn.Authenticator) remoteOptions { + + authOption := remote.WithAuthFromKeychain(keychain) + if auth != nil { + // auth take precedence over keychain here as we expect the caller to set + // the auth only if it is required. + authOption = remote.WithAuth(auth) + } + return remoteOptions{ + remote.WithContext(ctxTimeout), + remote.WithUserAgent(oci.UserAgent), + remote.WithTransport(transport), + authOption, + } +} + +// remoteOptions contains the options to interact with a remote registry. +// It can be used to pass options to go-containerregistry based libraries. +type remoteOptions []remote.Option + +// ociContentConfigChanged evaluates the current spec with the observations +// of the artifact in the status to determine if artifact content configuration +// has changed and requires rebuilding the artifact. +func ociContentConfigChanged(obj *sourcev1.OCIRepository) bool { + if !ptr.Equal(obj.Spec.Ignore, obj.Status.ObservedIgnore) { + return true + } + + if !layerSelectorEqual(obj.Spec.LayerSelector, obj.Status.ObservedLayerSelector) { + return true + } + + return false +} + +// Returns true if both arguments are nil or both arguments +// dereference to the same value. +// Based on k8s.io/utils/pointer/pointer.go pointer value equality. +func layerSelectorEqual(a, b *sourcev1.OCILayerSelector) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +func filterTags(filter string) filterFunc { + return func(tags []string) ([]string, error) { + if filter == "" { + return tags, nil + } + + match, err := regexp.Compile(filter) + if err != nil { + return nil, err + } + + validTags := []string{} + for _, tag := range tags { + if match.MatchString(tag) { + validTags = append(validTags, tag) + } + } + return validTags, nil + } +} diff --git a/internal/controller/ocirepository_controller_test.go b/internal/controller/ocirepository_controller_test.go new file mode 100644 index 000000000..6ea35e962 --- /dev/null +++ b/internal/controller/ocirepository_controller_test.go @@ -0,0 +1,3709 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/crane" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-core-go/signature/cose" + "github.com/notaryproject/notation-core-go/testhelper" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/registry" + "github.com/notaryproject/notation-go/signer" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" + "github.com/sigstore/cosign/v2/pkg/cosign" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + oras "oras.land/oras-go/v2/registry/remote" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/oci" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/tar" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + snotation "github.com/fluxcd/source-controller/internal/oci/notation" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + testproxy "github.com/fluxcd/source-controller/tests/proxy" +) + +func TestOCIRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { + g := NewWithT(t) + + namespaceName := "ocirepo-" + randStringRunes(5) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + } + g.Expect(k8sClient.Create(ctx, namespace)).ToNot(HaveOccurred()) + t.Cleanup(func() { + g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) + }) + + ocirepo := &sourcev1.OCIRepository{} + ocirepo.Name = "test-ocirepo" + ocirepo.Namespace = namespaceName + ocirepo.Spec = sourcev1.OCIRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: "oci://example.com", + } + // Add a test finalizer to prevent the object from getting deleted. + ocirepo.SetFinalizers([]string{"test-finalizer"}) + g.Expect(k8sClient.Create(ctx, ocirepo)).NotTo(HaveOccurred()) + // Add deletion timestamp by deleting the object. + g.Expect(k8sClient.Delete(ctx, ocirepo)).NotTo(HaveOccurred()) + + r := &OCIRepositoryReconciler{ + Client: k8sClient, + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + // NOTE: Only a real API server responds with an error in this scenario. + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(ocirepo)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestOCIRepository_Reconcile(t *testing.T) { + g := NewWithT(t) + + // Registry server with public images + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + t.Cleanup(func() { + regServer.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(regServer.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + tests := []struct { + name string + url string + tag string + semver string + revision string + mediaType string + operation string + assertArtifact []artifactFixture + }{ + { + name: "public tag", + url: podinfoVersions["6.1.6"].url, + tag: podinfoVersions["6.1.6"].tag, + revision: fmt.Sprintf("%s@%s", podinfoVersions["6.1.6"].tag, podinfoVersions["6.1.6"].digest.String()), + mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + operation: sourcev1.OCILayerCopy, + assertArtifact: []artifactFixture{ + { + expectedPath: "kustomize/deployment.yaml", + expectedChecksum: "6fd625effe6bb805b6a78943ee082a4412e763edb7fcaed6e8fe644d06cbf423", + }, + { + expectedPath: "kustomize/hpa.yaml", + expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9", + }, + }, + }, + { + name: "public semver", + url: podinfoVersions["6.1.5"].url, + semver: ">= 6.1 <= 6.1.5", + revision: fmt.Sprintf("%s@%s", podinfoVersions["6.1.5"].tag, podinfoVersions["6.1.5"].digest.String()), + assertArtifact: []artifactFixture{ + { + expectedPath: "kustomize/deployment.yaml", + expectedChecksum: "dce4f5f780a8e8994b06031e5b567bf488ceaaaabd9bd3fc278b4f3bfc8c577b", + }, + { + expectedPath: "kustomize/hpa.yaml", + expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-reconcile-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + origObj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{}, + Insecure: true, + }, + } + obj := origObj.DeepCopy() + + if tt.tag != "" { + obj.Spec.Reference.Tag = tt.tag + } + if tt.semver != "" { + obj.Spec.Reference.SemVer = tt.semver + } + if tt.mediaType != "" { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: tt.mediaType} + + if tt.operation != "" { + obj.Spec.LayerSelector.Operation = tt.operation + } + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be Ready + waitForSourceReadyWithArtifact(ctx, g, obj) + + // Check if the revision matches the expected revision + g.Expect(obj.Status.Artifact.Revision).To(Equal(tt.revision)) + + // Check if the metadata matches the expected annotations + g.Expect(obj.Status.Artifact.Metadata[oci.SourceAnnotation]).To(ContainSubstring("podinfo")) + g.Expect(obj.Status.Artifact.Metadata[oci.RevisionAnnotation]).To(ContainSubstring(tt.tag)) + + // Check if the artifact storage path matches the expected file path + localPath := testStorage.LocalPath(*obj.Status.Artifact) + t.Logf("artifact local path: %s", localPath) + + f, err := os.Open(localPath) + g.Expect(err).ToNot(HaveOccurred()) + defer f.Close() + + // create a tmp directory to extract artifact + tmp, err := os.MkdirTemp("", "ocirepository-test-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmp) + + err = tar.Untar(f, tmp, tar.WithMaxUntarSize(-1)) + g.Expect(err).ToNot(HaveOccurred()) + + for _, af := range tt.assertArtifact { + expectedFile := filepath.Join(tmp, af.expectedPath) + g.Expect(expectedFile).To(BeAnExistingFile()) + + f2, err := os.Open(expectedFile) + g.Expect(err).ToNot(HaveOccurred()) + defer f2.Close() + + d, err := intdigest.Canonical.FromReader(f2) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(d.Encoded()).To(Equal(af.expectedChecksum)) + } + + // Check if the object status is valid + condns := &conditionscheck.Conditions{NegativePolarity: ociRepositoryReadyCondition.NegativePolarity} + checker := conditionscheck.NewChecker(testEnv.Client, condns) + checker.WithT(g).CheckErr(ctx, obj) + + // kstatus client conformance check + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + waitForSourceDeletion(ctx, g, obj) + + // Check if a suspended object gets deleted. + obj = origObj.DeepCopy() + testSuspendedObjectDeleteWithArtifact(ctx, g, obj) + }) + } +} + +func TestOCIRepository_Reconcile_MediaType(t *testing.T) { + g := NewWithT(t) + + // Registry server with public images + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + t.Cleanup(func() { + regServer.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(regServer.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + tests := []struct { + name string + url string + tag string + mediaType string + wantErr bool + }{ + { + name: "Works with no media type", + url: podinfoVersions["6.1.4"].url, + tag: podinfoVersions["6.1.4"].tag, + }, + { + name: "Works with Flux CLI media type", + url: podinfoVersions["6.1.5"].url, + tag: podinfoVersions["6.1.5"].tag, + mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + name: "Fails with unknown media type", + url: podinfoVersions["6.1.6"].url, + tag: podinfoVersions["6.1.6"].tag, + mediaType: "application/invalid.tar.gzip", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-mediatype-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{ + Tag: tt.tag, + }, + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: tt.mediaType, + }, + Insecure: true, + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be reconciled + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition != nil && !conditions.IsUnknown(obj, meta.ReadyCondition) + }, timeout).Should(BeTrue()) + + g.Expect(conditions.IsReady(obj)).To(BeIdenticalTo(!tt.wantErr)) + if tt.wantErr { + g.Expect(conditions.Get(obj, meta.ReadyCondition).Message).Should(ContainSubstring("failed to find layer with media type")) + } + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { + type secretOptions struct { + username string + password string + includeSA bool + includeSecret bool + } + + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(tlsCA) + + tests := []struct { + name string + url string + registryOpts registryOptions + craneOpts []crane.Option + secretOpts secretOptions + tlsCertSecret *corev1.Secret + insecure bool + provider string + providerImg string + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without basic auth", + want: sreconcile.ResultSuccess, + craneOpts: []crane.Option{crane.Insecure}, + insecure: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTP with basic auth secret", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + includeSecret: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTP with serviceaccount", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + includeSA: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTP registry - basic auth with missing secret", + want: sreconcile.ResultEmpty, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + wantErr: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), + }, + }, + { + name: "HTTP registry - basic auth with invalid secret", + want: sreconcile.ResultEmpty, + wantErr: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + insecure: true, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: "wrong-pass", + password: "wrong-pass", + includeSecret: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), + }, + }, + { + name: "HTTP registry - basic auth with invalid serviceaccount", + want: sreconcile.ResultEmpty, + wantErr: true, + insecure: true, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: "wrong-pass", + password: "wrong-pass", + includeSA: true, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), + }, + }, + { + name: "HTTPS with valid certfile", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTPS with valid certfile using deprecated keys", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + { + name: "HTTPS without certfile", + want: sreconcile.ResultEmpty, + wantErr: true, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), + }, + }, + { + name: "HTTPS with invalid certfile", + want: sreconcile.ResultEmpty, + wantErr: true, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": []byte("invalid"), + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to parse CA certificate"), + }, + }, + { + name: "HTTPS with certfile using both caFile and ca.crt ignores caFile", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withTLS: true, + }, + craneOpts: []crane.Option{crane.WithTransport(&http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }), + }, + tlsCertSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "caFile": []byte("invalid"), + }, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + { + name: "with contextual login provider", + wantErr: true, + provider: "aws", + providerImg: "oci://123456789000.dkr.ecr.us-east-2.amazonaws.com/test", + craneOpts: []crane.Option{ + crane.Insecure, + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to get credential from"), + }, + }, + { + name: "secretRef takes precedence over provider", + want: sreconcile.ResultSuccess, + registryOpts: registryOptions{ + withBasicAuth: true, + }, + craneOpts: []crane.Option{ + crane.WithAuth(&authn.Basic{ + Username: testRegistryUsername, + Password: testRegistryPassword, + }), + crane.Insecure, + }, + secretOpts: secretOptions{ + username: testRegistryUsername, + password: testRegistryPassword, + includeSecret: true, + }, + insecure: true, + provider: "azure", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + img, err := createPodinfoImageFromTar("podinfo-6.1.6.tar", "6.1.6", server.registryHost, tt.craneOpts...) + g.Expect(err).ToNot(HaveOccurred()) + obj.Spec.URL = img.url + obj.Spec.Reference = &sourcev1.OCIRepositoryRef{ + Tag: img.tag, + } + + if tt.provider != "" { + obj.Spec.Provider = tt.provider + } + // If a provider specific image is provided, overwrite existing URL + // set earlier. It'll fail but it's necessary to set them because + // the login check expects the URLs to be of certain pattern. + if tt.providerImg != "" { + obj.Spec.URL = tt.providerImg + } + + if tt.secretOpts.username != "" && tt.secretOpts.password != "" { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-secretref", + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + ".dockerconfigjson": []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`, + server.registryHost, tt.secretOpts.username, tt.secretOpts.password)), + }, + } + clientBuilder.WithObjects(secret) + + if tt.secretOpts.includeSA { + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sa-ocitest", + }, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: secret.Name}}, + } + clientBuilder.WithObjects(serviceAccount) + obj.Spec.ServiceAccountName = serviceAccount.Name + } + + if tt.secretOpts.includeSecret { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: secret.Name, + } + } + } + + if tt.tlsCertSecret != nil { + clientBuilder.WithObjects(tt.tlsCertSecret) + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.tlsCertSecret.Name, + } + } + if tt.insecure { + obj.Spec.Insecure = true + } + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + opts := makeRemoteOptions(ctx, makeTransport(tt.insecure), authn.DefaultKeychain, nil) + ref, err := r.getArtifactRef(obj, opts) + g.Expect(err).To(BeNil()) + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", img.tag, img.digest.String())) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", ref.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, &meta.Artifact{}, tmpDir) + if tt.wantErr { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func makeTransport(insecure bool) http.RoundTripper { + transport := remote.DefaultTransport.(*http.Transport).Clone() + if insecure { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} +func TestOCIRepository_CertSecret(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{ + withTLS: true, + withClientCertAuth: true, + }) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + regServer.Close() + }) + + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(tlsCA) + clientTLSCert, err := tls.X509KeyPair(clientPublicKey, clientPrivateKey) + g.Expect(err).ToNot(HaveOccurred()) + + transport := http.DefaultTransport.(*http.Transport) + transport.TLSClientConfig = &tls.Config{ + RootCAs: pool, + Certificates: []tls.Certificate{clientTLSCert}, + } + pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", regServer.registryHost, []crane.Option{ + crane.WithTransport(transport), + }...) + g.Expect(err).NotTo(HaveOccurred()) + + tlsSecretClientCert := corev1.Secret{ + Data: map[string][]byte{ + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": clientPrivateKey, + }, + } + + tests := []struct { + name string + url string + digest gcrv1.Hash + certSecret *corev1.Secret + expectreadyconition bool + expectedstatusmessage string + }{ + { + name: "test connection with CACert, Client Cert and Private Key", + url: pi.url, + digest: pi.digest, + certSecret: &tlsSecretClientCert, + expectreadyconition: true, + expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.String()), + }, + { + name: "test connection with no secret", + url: pi.url, + digest: pi.digest, + expectreadyconition: false, + expectedstatusmessage: "tls: failed to verify certificate: x509:", + }, + { + name: "test connection with with incorrect private key", + url: pi.url, + digest: pi.digest, + certSecret: &corev1.Secret{ + Data: map[string][]byte{ + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": []byte("invalid-key"), + }, + }, + expectreadyconition: false, + expectedstatusmessage: "failed to generate transport for '': failed to parse TLS certificate and key: tls: failed to find any PEM data in key input", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-test-resource", + Namespace: ns.Name, + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, + }, + } + + if tt.certSecret != nil { + tt.certSecret.ObjectMeta = metav1.ObjectMeta{ + GenerateName: "cert-secretref", + Namespace: ns.Name, + } + + g.Expect(testEnv.CreateAndWait(ctx, tt.certSecret)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, tt.certSecret)).To(Succeed()) }() + + obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: tt.certSecret.Name} + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + resultobj := sourcev1.OCIRepository{} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + return len(resultobj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil || conditions.IsUnknown(&resultobj, meta.ReadyCondition) { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + conditions.IsReady(&resultobj) == tt.expectreadyconition + }, timeout).Should(BeTrue()) + + tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) + + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_ProxySecret(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + regServer.Close() + }) + + pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", regServer.registryHost) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + url string + digest gcrv1.Hash + proxySecret *corev1.Secret + expectreadyconition bool + expectedstatusmessage string + }{ + { + name: "test proxied connection", + url: pi.url, + digest: pi.digest, + proxySecret: &corev1.Secret{ + Data: map[string][]byte{ + "address": []byte(fmt.Sprintf("http://%s", proxyAddr)), + }, + }, + expectreadyconition: true, + expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.String()), + }, + { + name: "test proxy connection error", + url: pi.url, + digest: pi.digest, + proxySecret: &corev1.Secret{ + Data: map[string][]byte{ + "address": []byte(fmt.Sprintf("http://localhost:%d", proxyPort+1)), + }, + }, + expectreadyconition: false, + expectedstatusmessage: "failed to pull artifact", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-test-resource", + Namespace: ns.Name, + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, + }, + } + + if tt.proxySecret != nil { + tt.proxySecret.ObjectMeta = metav1.ObjectMeta{ + GenerateName: "proxy-secretref", + Namespace: ns.Name, + } + + g.Expect(testEnv.CreateAndWait(ctx, tt.proxySecret)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, tt.proxySecret)).To(Succeed()) }() + + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{Name: tt.proxySecret.Name} + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + resultobj := sourcev1.OCIRepository{} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + return len(resultobj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil || conditions.IsUnknown(&resultobj, meta.ReadyCondition) { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + conditions.IsReady(&resultobj) == tt.expectreadyconition + }, timeout).Should(BeTrue()) + + tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) + + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + img6 := podinfoVersions["6.1.6"] + img5 := podinfoVersions["6.1.5"] + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + want sreconcile.Result + wantErr bool + wantRevision string + assertConditions []metav1.Condition + }{ + { + name: "no reference (latest tag)", + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("latest@%s", img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "tag reference", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "semver reference", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.5", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "digest reference", + reference: &sourcev1.OCIRepositoryRef{ + Digest: img6.digest.String(), + }, + wantRevision: img6.digest.String(), + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "invalid tag reference", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.0", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, " MANIFEST_UNKNOWN"), + }, + }, + { + name: "invalid semver reference", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: "<= 6.1.0", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "failed to determine the artifact tag for 'oci://%s/podinfo': no match found for semver: <= 6.1.0", server.registryHost), + }, + }, + { + name: "invalid digest reference", + reference: &sourcev1.OCIRepositoryRef{ + Digest: "invalid", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to determine artifact digest"), + }, + }, + { + name: "semver should take precedence over tag", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.5", + Tag: "6.1.5", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "digest should take precedence over semver", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + SemVer: ">= 6.1.6", + Digest: img5.digest.String(), + }, + want: sreconcile.ResultSuccess, + wantRevision: img5.digest.String(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "checkout-strategy-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact.Revision).To(Equal(tt.wantRevision)) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + insecure bool + want sreconcile.Result + wantErr bool + wantErrMsg string + shouldSign bool + useDigest bool + addMultipleCerts bool + provideNoCert bool + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "signed image should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + shouldSign: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + useDigest: true, + wantErrMsg: "failed to verify the signature using provider 'notation': no signature is associated with \"\"", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no signature is associated with \"\", make sure the artifact was signed successfully"), + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Spec.Verify = nil + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + }, + want: sreconcile.ResultSuccess, + }, + { + name: "same artifact, verified before, change in obj gen verify again", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + // Set Verified with old observed generation and different reason/message. + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + // Set new object generation. + obj.SetGeneration(3) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no verify for already verified, verified condition remains the same", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + // Artifact present and custom verified condition reason/message. + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Verified", "verified"), + }, + }, + { + name: "signed image on an insecure registry passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "signed image on an insecure registry using digest as reference passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + useDigest: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level audit and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + useDigest: true, + want: sreconcile.ResultSuccess, + addMultipleCerts: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no cert provided should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + useDigest: true, + provideNoCert: true, + // no namespace but the namespace name should appear before the /notation-config + wantErrMsg: "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no certificates found in secret '/notation-config"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + signer, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelStrict.Name, Override: map[trustpolicy.ValidationType]trustpolicy.ValidationAction{trustpolicy.TypeRevocation: trustpolicy.ActionSkip}}, + TrustStores: []string{"ca:valid-trust-store"}, + TrustedIdentities: []string{"*"}, + }, + }, + } + + tmpDir := t.TempDir() + + policy, err := json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-trust-store", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + g.Expect(r.Create(ctx, caSecret)).ToNot(HaveOccurred()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, registryOptions{ + withTLS: !tt.insecure, + }) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + data := map[string][]byte{} + + if tt.addMultipleCerts { + data["a.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("a not used for signing").Cert.Raw + data["b.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("b not used for signing").Cert.Raw + data["c.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("c not used for signing").Cert.Raw + } + + if !tt.provideNoCert { + data["notation.crt"] = certTuple.Cert.Raw + } + + data["trustpolicy.json"] = policy + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "notation-config-", + }, + Data: data, + } + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + if tt.insecure { + obj.Spec.Insecure = true + } else { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "valid-trust-store", + } + } + + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: secret.GetName()} + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, tt.insecure, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.useDigest { + obj.Spec.Reference.Digest = podinfoVersions[tt.reference.Tag].digest.String() + } + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.shouldSign { + remoteRepo, err := oras.NewRepository(artifactRef.String()) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.insecure { + remoteRepo.PlainHTTP = true + } + + repo := registry.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifactRef.String(), + } + + _, err = notation.Sign(ctx, signer, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + } + + image := podinfoVersions[tt.reference.Tag] + assertConditions := tt.assertConditions + for k := range assertConditions { + if tt.useDigest { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", image.digest.String()) + } else { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + } + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + signatureVerification trustpolicy.SignatureVerification + trustedIdentities []string + trustStores []string + want sreconcile.Result + wantErr bool + wantErrMsg string + useDigest bool + usePolicyJson bool + provideNoPolicy bool + policyJson string + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "verification level audit and incorrect trust identity should fail verification but not error", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, + trustedIdentities: []string{"x509.subject: C=US, ST=WA, L=Seattle, O=Notary, CN=example.com"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "verification level permissive and incorrect trust identity should fail verification and error", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, + trustedIdentities: []string{"x509.subject: C=US, ST=WA, L=Seattle, O=Notary, CN=example.com"}, + trustStores: []string{"ca:valid-trust-store"}, + useDigest: true, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "failed to verify the signature using provider 'notation': signature verification failed\nfailed to verify signature with digest , signing certificate from the digital signature does not match the X.509 trusted identities [map[\"C\":\"US\" \"CN\":\"example.com\" \"L\":\"Seattle\" \"O\":\"Notary\" \"ST\":\"WA\"]] defined in the trust policy \"test-statement-name\"", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': signature verification failed\nfailed to verify signature with digest , signing certificate from the digital signature does not match the X.509 trusted identities [map[\"C\":\"US\" \"CN\":\"example.com\" \"L\":\"Seattle\" \"O\":\"Notary\" \"ST\":\"WA\"]] defined in the trust policy \"test-statement-name\""), + }, + }, + { + name: "verification level permissive and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, + trustedIdentities: []string{"*"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level audit and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, + trustedIdentities: []string{"*"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level skip and should not be marked as verified", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelSkip.Name}, + trustedIdentities: []string{}, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + }, + }, + { + name: "valid json but empty policy json should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + usePolicyJson: true, + policyJson: "{}", + wantErr: true, + wantErrMsg: "trust policy document has empty version", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "trust policy document has empty version, version must be specified"), + }, + }, + { + name: "empty string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + usePolicyJson: true, + policyJson: "", + wantErr: true, + wantErrMsg: fmt.Sprintf("error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), + }, + }, + { + name: "invalid character in string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + usePolicyJson: true, + policyJson: "{\"version\": \"1.0\u000A\", \"trust_policies\": []}", + wantErr: true, + wantErrMsg: fmt.Sprintf("error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), + }, + }, + { + name: "empty string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + provideNoPolicy: true, + wantErr: true, + wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + signer, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + tmpDir := t.TempDir() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, registryOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var policy []byte + + if !tt.usePolicyJson { + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: tt.signatureVerification, + TrustStores: tt.trustStores, + TrustedIdentities: tt.trustedIdentities, + }, + }, + } + + policy, err = json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + } else { + policy = []byte(tt.policyJson) + } + + data := map[string][]byte{} + + if !tt.provideNoPolicy { + data["trustpolicy.json"] = policy + } + + data["notation.crt"] = certTuple.Cert.Raw + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "notation-", + }, + Data: data, + } + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + obj.Spec.Insecure = true + + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: secret.GetName()} + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.useDigest { + obj.Spec.Reference.Digest = podinfoVersions[tt.reference.Tag].digest.String() + } + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo, err := oras.NewRepository(artifactRef.String()) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo.PlainHTTP = true + + repo := registry.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifactRef.String(), + } + + _, err = notation.Sign(ctx, signer, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + + image := podinfoVersions[tt.reference.Tag] + signatureDigest := "" + + artifactDescriptor, err := repo.Resolve(ctx, image.tag) + g.Expect(err).ToNot(HaveOccurred()) + _ = repo.ListSignatures(ctx, artifactDescriptor, func(signatureManifests []ocispec.Descriptor) error { + g.Expect(len(signatureManifests)).Should(Equal(1)) + signatureDigest = signatureManifests[0].Digest.String() + return nil + }) + + assertConditions := tt.assertConditions + for k := range assertConditions { + if tt.useDigest { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", image.digest.String()) + } else { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + } + + if signatureDigest != "" { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", signatureDigest) + } + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + if signatureDigest != "" { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", signatureDigest) + } + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + insecure bool + want sreconcile.Result + wantErr bool + wantErrMsg string + shouldSign bool + keyless bool + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "signed image should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + shouldSign: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + wantErrMsg: "failed to verify the signature using provider 'cosign': no matching signatures were found for ''", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no matching signatures were found for ''"), + }, + }, + { + name: "unsigned image should not pass keyless verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + want: sreconcile.ResultEmpty, + keyless: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no signatures found"), + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Spec.Verify = nil + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + }, + want: sreconcile.ResultSuccess, + }, + { + name: "same artifact, verified before, change in obj gen verify again", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + // Set Verified with old observed generation and different reason/message. + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + // Set new object generation. + obj.SetGeneration(3) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no verify for already verified, verified condition remains the same", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + // Artifact present and custom verified condition reason/message. + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Verified", "verified"), + }, + }, + { + name: "signed image on an insecure registry passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + pf := func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + } + + keys, err := cosign.GenerateKeyPair(pf) + g.Expect(err).ToNot(HaveOccurred()) + + tmpDir := t.TempDir() + err = os.WriteFile(path.Join(tmpDir, "cosign.key"), keys.PrivateBytes, 0600) + g.Expect(err).ToNot(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cosign-key", + }, + Data: map[string][]byte{ + "cosign.pub": keys.PublicBytes, + }} + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-cert-cosign", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + g.Expect(r.Create(ctx, caSecret)).ToNot(HaveOccurred()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + regOpts := registryOptions{ + withTLS: !tt.insecure, + } + server, err := setupRegistryServer(ctx, workspaceDir, regOpts) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + if tt.insecure { + obj.Spec.Insecure = true + } else { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "ca-cert-cosign", + } + } + + if !tt.keyless { + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: "cosign-key"} + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, tt.insecure, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.shouldSign { + ko := coptions.KeyOpts{ + KeyRef: path.Join(tmpDir, "cosign.key"), + PassFunc: pf, + } + + ro := &coptions.RootOptions{ + Timeout: timeout, + } + err = sign.SignCmd(ro, ko, coptions.SignOptions{ + Upload: true, + SkipConfirmation: true, + TlogUpload: false, + + Registry: coptions.RegistryOptions{Keychain: keychain, AllowInsecure: true, AllowHTTPRegistry: tt.insecure}, + }, []string{artifactRef.String()}) + + g.Expect(err).ToNot(HaveOccurred()) + } + + image := podinfoVersions[tt.reference.Tag] + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testing.T) { + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + want sreconcile.Result + wantErr bool + wantErrMsg string + beforeFunc func(obj *sourcev1.OCIRepository) + assertConditions []metav1.Condition + revision string + }{ + { + name: "signed image with no identity matching specified should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "signed image with correct subject and issuer should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "signed image with both incorrect and correct identity matchers should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + { + + Subject: "^https://github.com/stefanprodan/podinfo.*$", + Issuer: "^https://token.actions.githubusercontent.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "signed image with incorrect subject and issuer should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.5.1", + }, + wantErr: true, + want: sreconcile.ResultEmpty, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ + { + Subject: "intruder", + Issuer: "^https://honeypot.com$", + }, + } + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no matching signatures: none of the expected identities matched what was in the certificate"), + }, + revision: "6.5.1@sha256:049fff8f9c92abba8615c6c3dcf9d10d30082213f6fe86c9305257f806c31e31", + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.0", + }, + wantErr: true, + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no signatures found"), + }, + revision: "6.1.0@sha256:3816fe9636a297f0c934b1fa0f46fe4c068920375536ac2803604adfb4c55894", + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "cosign", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Reference: tt.reference, + }, + } + url := strings.TrimPrefix(obj.Spec.URL, "oci://") + ":" + tt.reference.Tag + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", tt.revision) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", url) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "cosign") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, t.TempDir()) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", url) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_noop(t *testing.T) { + g := NewWithT(t) + + testRevision := "6.1.5@sha256:8e4057c22d531d40e12b065443cb0d80394b7257c4dc557cb1fbd4dce892b86d" + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + _, err = pushMultiplePodinfoImages(server.registryHost, true, "6.1.5") + g.Expect(err).ToNot(HaveOccurred()) + + // NOTE: The following verifies if it was a noop run by checking the + // artifact metadata which is unknown unless the image is pulled. + + tests := []struct { + name string + beforeFunc func(obj *sourcev1.OCIRepository) + afterFunc func(g *WithT, artifact *meta.Artifact) + }{ + { + name: "full reconcile - no existing artifact", + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + { + name: "noop - artifact revisions match", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).To(BeEmpty()) + }, + }, + { + name: "full reconcile - same rev, unobserved ignore", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.ObservedIgnore = ptr.To("aaa") + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + { + name: "noop - same rev, observed ignore", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Ignore = ptr.To("aaa") + obj.Status.ObservedIgnore = ptr.To("aaa") + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).To(BeEmpty()) + }, + }, + { + name: "full reconcile - same rev, unobserved layer selector", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + { + name: "noop - same rev, observed layer selector", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).To(BeEmpty()) + }, + }, + { + name: "full reconcile - same rev, observed layer selector changed", + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerExtract, + } + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{ + Revision: testRevision, + } + }, + afterFunc: func(g *WithT, artifact *meta.Artifact) { + g.Expect(artifact.Metadata).ToNot(BeEmpty()) + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "noop-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.5"}, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultSuccess)) + + if tt.afterFunc != nil { + tt.afterFunc(g, artifact) + } + }) + } +} + +func TestOCIRepository_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + targetPath string + artifact *meta.Artifact + beforeFunc func(obj *sourcev1.OCIRepository) + want sreconcile.Result + wantErr bool + assertArtifact *meta.Artifact + assertPaths []string + assertConditions []metav1.Condition + afterFunc func(g *WithT, obj *sourcev1.OCIRepository) + }{ + { + name: "Archiving Artifact creates correct files and condition", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new revision") + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:6a5bd135a816ec0ad246c41cfdd87629e40ef6520001aeb2d0118a703abe9e7a")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact with source ignore", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{Revision: "revision"}, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Ignore = ptr.To("foo.txt") + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:9102e9c8626e48821a91a4963436f1673cd85f8fb3deb843c992f85b995c38ea")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "No status changes if artifact is already present", + artifact: &meta.Artifact{ + Revision: "revision", + }, + targetPath: "testdata/oci/repository", + want: sreconcile.ResultSuccess, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ + Revision: "revision", + } + }, + assertArtifact: &meta.Artifact{ + Revision: "revision", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, unobserved ignore, rebuild artifact", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + obj.Spec.Ignore = ptr.To("aaa") + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(*obj.Status.ObservedIgnore).To(Equal("aaa")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, unobserved layer selector, rebuild artifact", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, observed layer selector changed, rebuild artifact", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + Path: "foo.txt", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerCopy, + } + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "latest.tar.gz", + }, + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { + g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) + g.Expect(obj.Status.ObservedLayerSelector.Operation).To(Equal(sourcev1.OCILayerCopy)) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "Artifact already present, observed ignore and layer selector, up-to-date", + targetPath: "testdata/oci/repository", + artifact: &meta.Artifact{ + Revision: "revision", + }, + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Ignore = ptr.To("aaa") + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} + obj.Status.ObservedIgnore = ptr.To("aaa") + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + }, + want: sreconcile.ResultSuccess, + assertArtifact: &meta.Artifact{ + Revision: "revision", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), + }, + }, + { + name: "target path doesn't exist", + targetPath: "testdata/oci/non-existent", + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path: "), + }, + }, + { + name: "target path is a file", + targetPath: "testdata/oci/repository/foo.txt", + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "source path 'testdata/oci/repository/foo.txt' is not a directory"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + _ = resetChmod(tt.targetPath, 0o755, 0o644) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "reconcile-artifact-", + Generation: 1, + }, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &meta.Artifact{} + if tt.artifact != nil { + artifact = tt.artifact + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileArtifact(ctx, sp, obj, artifact, tt.targetPath) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.assertArtifact != nil { + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.artifact)) + } + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + + for _, p := range tt.assertPaths { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + p = filepath.Join(filepath.Dir(localPath), p) + _, err := os.Lstat(p) + g.Expect(err).ToNot(HaveOccurred()) + } + }) + } +} + +func TestOCIRepository_getArtifactRef(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + imgs, err := pushMultiplePodinfoImages(server.registryHost, true, + "6.1.4", + "6.1.5-beta.1", + "6.1.5-rc.1", + "6.1.5", + "6.1.6-rc.1", + "6.1.6", + ) + g.Expect(err).ToNot(HaveOccurred()) + + tests := []struct { + name string + url string + reference *sourcev1.OCIRepositoryRef + wantErr bool + want string + }{ + { + name: "valid url with no reference", + url: "oci://ghcr.io/stefanprodan/charts", + want: "ghcr.io/stefanprodan/charts:latest", + }, + { + name: "valid url with tag reference", + url: "oci://ghcr.io/stefanprodan/charts", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + want: "ghcr.io/stefanprodan/charts:6.1.6", + }, + { + name: "valid url with digest reference", + url: "oci://ghcr.io/stefanprodan/charts", + reference: &sourcev1.OCIRepositoryRef{ + Digest: imgs["6.1.6"].digest.String(), + }, + want: "ghcr.io/stefanprodan/charts@" + imgs["6.1.6"].digest.String(), + }, + { + name: "valid url with semver reference", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.6", + }, + want: server.registryHost + "/podinfo:6.1.6", + }, + { + name: "invalid url without oci prefix", + url: "ghcr.io/stefanprodan/charts", + wantErr: true, + }, + { + name: "valid url with semver filter", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.x-0", + SemverFilter: ".*-rc.*", + }, + want: server.registryHost + "/podinfo:6.1.6-rc.1", + }, + { + name: "valid url with semver filter and unexisting version", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.x-0", + SemverFilter: ".*-alpha.*", + }, + wantErr: true, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "artifact-url-", + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + opts := makeRemoteOptions(ctx, makeTransport(true), authn.DefaultKeychain, nil) + got, err := r.getArtifactRef(obj, opts) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got.String()).To(Equal(tt.want)) + }) + } +} + +func TestOCIRepository_invalidURL(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-invalid-url-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/test/test:v1", + Interval: metav1.Duration{Duration: 60 * time.Minute}, + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + resultobj := sourcev1.OCIRepository{} + + // Wait for the object to fail + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + !conditions.IsUnknown(&resultobj, meta.ReadyCondition) + }, timeout).Should(BeTrue()) + + // Verify that stalled condition is present in status + stalledCondition := conditions.Get(&resultobj, meta.StalledCondition) + g.Expect(stalledCondition).ToNot(BeNil()) + g.Expect(stalledCondition.Reason).Should(Equal(sourcev1.URLInvalidReason)) +} + +func TestOCIRepository_objectLevelWorkloadIdentityFeatureGate(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-olwifg-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + err = testEnv.Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "test", + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Provider: "aws", + ServiceAccountName: "test", + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + resultobj := &sourcev1.OCIRepository{} + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + return conditions.IsStalled(resultobj) + }).Should(BeTrue()) + + stalledCondition := conditions.Get(resultobj, meta.StalledCondition) + g.Expect(stalledCondition).ToNot(BeNil()) + g.Expect(stalledCondition.Reason).Should(Equal(meta.FeatureGateDisabledReason)) + g.Expect(stalledCondition.Message).Should(Equal("to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller")) + + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + resultobj.Annotations = map[string]string{ + meta.ReconcileRequestAnnotation: time.Now().Format(time.RFC3339), + } + return testEnv.Update(ctx, resultobj) == nil + }).Should(BeTrue()) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + logOCIRepoStatus(t, resultobj) + return !conditions.IsReady(resultobj) && + conditions.GetReason(resultobj, meta.ReadyCondition) == sourcev1.AuthenticationFailedReason + }).Should(BeTrue()) +} + +func TestOCIRepository_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.OCIRepository, storage *storage.Storage) error + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + assertArtifact *meta.Artifact + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + revisions := []string{"a", "b", "c", "d"} + + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", v), + Revision: v, + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil { + return err + } + + if n != len(revisions)-1 { + time.Sleep(time.Second) + } + } + + storage.SetArtifactURL(obj.Status.Artifact) + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + assertArtifact: &meta.Artifact{ + Path: "/oci-reconcile-storage/d.txt", + Revision: "d", + Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", + URL: testStorage.Hostname + "/oci-reconcile-storage/d.txt", + Size: int64p(int64(len("d"))), + }, + assertPaths: []string{ + "/oci-reconcile-storage/d.txt", + "/oci-reconcile-storage/c.txt", + "!/oci-reconcile-storage/b.txt", + "!/oci-reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "build artifact first time", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact"), + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/oci-reconcile-storage/invalid.txt", + Revision: "e", + } + storage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/oci-reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices empty artifact digest", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + f := "empty-digest.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/oci-reconcile-storage/empty-digest.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "notices artifact digest mismatch", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + f := "digest-mismatch.txt" + + obj.Status.Artifact = &meta.Artifact{ + Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), + Revision: "fake", + } + + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(f), 0o600); err != nil { + return err + } + + // Overwrite with a different digest + obj.Status.Artifact.Digest = "sha256:6c329d5322473f904e2f908a51c12efa0ca8aa4201dd84f2c9d203a6ab3e9023" + + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/oci-reconcile-storage/digest-mismatch.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: disappeared from storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ + Path: "/oci-reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/oci-reconcile-storage/hostname.txt", + } + if err := storage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := storage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil { + return err + } + conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/oci-reconcile-storage/hostname.txt", + }, + assertArtifact: &meta.Artifact{ + Path: "/oci-reconcile-storage/hostname.txt", + Revision: "f", + Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/oci-reconcile-storage/hostname.txt", + Size: int64p(int64(len("file"))), + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: 1, + }, + } + + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileStorage(ctx, sp, obj, &meta.Artifact{}, "") + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + + g.Expect(absoluteP).ToNot(BeAnExistingFile()) + } + + // In-progress status condition validity. + checker := conditionscheck.NewInProgressChecker(r.Client) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +func TestOCIRepository_ReconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &OCIRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.OCIRepositoryStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestOCIRepositoryReconciler_notify(t *testing.T) { + + noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason") + noopErr.Ignore = true + + tests := []struct { + name string + res sreconcile.Result + resErr error + oldObjBeforeFunc func(obj *sourcev1.OCIRepository) + newObjBeforeFunc func(obj *sourcev1.OCIRepository) + commit git.Commit + wantEvent string + }{ + { + name: "error - no event", + res: sreconcile.ResultEmpty, + resErr: errors.New("some error"), + }, + { + name: "new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.URL = "oci://newurl.io" + obj.Status.Artifact = &meta.Artifact{ + Revision: "xxx", + Digest: "yyy", + Metadata: map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: "6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872", + }, + } + }, + wantEvent: "Normal NewArtifact stored artifact with revision 'xxx' from 'oci://newurl.io', origin source 'https://github.com/stefanprodan/podinfo', origin revision '6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872'", + }, + { + name: "recovery from failure", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.URL = "oci://newurl.io" + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal Succeeded stored artifact with revision 'xxx' from 'oci://newurl.io'", + }, + { + name: "recovery and new artifact", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") + }, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.URL = "oci://newurl.io" + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + wantEvent: "Normal NewArtifact stored artifact with revision 'aaa' from 'oci://newurl.io'", + }, + { + name: "no updates", + res: sreconcile.ResultSuccess, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + }, + { + name: "no updates on requeue", + res: sreconcile.ResultRequeue, + resErr: nil, + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "ready") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + recorder := record.NewFakeRecorder(32) + + oldObj := &sourcev1.OCIRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjBeforeFunc != nil { + tt.oldObjBeforeFunc(oldObj) + } + if tt.newObjBeforeFunc != nil { + tt.newObjBeforeFunc(newObj) + } + + reconciler := &OCIRepositoryReconciler{ + EventRecorder: recorder, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + reconciler.notify(ctx, oldObj, newObj, tt.res, tt.resErr) + + select { + case x, ok := <-recorder.Events: + g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received") + if tt.wantEvent != "" { + g.Expect(x).To(ContainSubstring(tt.wantEvent)) + } + default: + if tt.wantEvent != "" { + t.Errorf("expected some event to be emitted") + } + } + }) + } +} + +type artifactFixture struct { + expectedPath string + expectedChecksum string +} + +type podinfoImage struct { + url string + tag string + digest gcrv1.Hash +} + +func createPodinfoImageFromTar(tarFileName, tag, registryURL string, opts ...crane.Option) (*podinfoImage, error) { + // Create Image + image, err := crane.Load(path.Join("testdata", "podinfo", tarFileName)) + if err != nil { + return nil, err + } + + image = setPodinfoImageAnnotations(image, tag) + + // url.Parse doesn't handle urls with no scheme well e.g localhost: + if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) { + registryURL = fmt.Sprintf("http://%s", registryURL) + } + + myURL, err := url.Parse(registryURL) + if err != nil { + return nil, err + } + repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host) + + // Image digest + podinfoImageDigest, err := image.Digest() + if err != nil { + return nil, err + } + + // Push image + err = crane.Push(image, repositoryURL, opts...) + if err != nil { + return nil, err + } + + // Tag the image + err = crane.Tag(repositoryURL, tag, opts...) + if err != nil { + return nil, err + } + + return &podinfoImage{ + url: "oci://" + repositoryURL, + tag: tag, + digest: podinfoImageDigest, + }, nil +} + +func pushMultiplePodinfoImages(serverURL string, insecure bool, versions ...string) (map[string]podinfoImage, error) { + podinfoVersions := make(map[string]podinfoImage) + + var opts []crane.Option + // If the registry is insecure then instruct configure an insecure HTTP client, + // otherwise add the root CA certificate since the HTTPS server is self signed. + if insecure { + opts = append(opts, crane.Insecure) + } else { + transport := http.DefaultTransport.(*http.Transport) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(tlsCA) + transport.TLSClientConfig = &tls.Config{ + RootCAs: pool, + } + opts = append(opts, crane.WithTransport(transport)) + } + for i := 0; i < len(versions); i++ { + pi, err := createPodinfoImageFromTar(fmt.Sprintf("podinfo-%s.tar", versions[i]), versions[i], serverURL, opts...) + if err != nil { + return nil, err + } + + podinfoVersions[versions[i]] = *pi + + } + + return podinfoVersions, nil +} + +func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { + metadata := map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: fmt.Sprintf("%s@sha1:b3b00fe35424a45d373bf4c7214178bc36fd7872", tag), + } + return mutate.Annotations(img, metadata).(gcrv1.Image) +} + +func TestOCIContentConfigChanged(t *testing.T) { + tests := []struct { + name string + spec sourcev1.OCIRepositorySpec + status sourcev1.OCIRepositoryStatus + want bool + }{ + { + name: "same ignore, no layer selector", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + }, + want: false, + }, + { + name: "different ignore, no layer selector", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("mmm"), + }, + want: true, + }, + { + name: "same ignore, same layer selector", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: false, + }, + { + name: "same ignore, different layer selector operation", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerCopy, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: true, + }, + { + name: "same ignore, different layer selector mediatype", + spec: sourcev1.OCIRepositorySpec{ + Ignore: ptr.To("nnn"), + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "bar", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedIgnore: ptr.To("nnn"), + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: true, + }, + { + name: "no ignore, same layer selector", + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: false, + }, + { + name: "no ignore, different layer selector", + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "bar", + Operation: sourcev1.OCILayerExtract, + }, + }, + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ + MediaType: "foo", + Operation: sourcev1.OCILayerExtract, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.OCIRepository{ + Spec: tt.spec, + Status: tt.status, + } + + g.Expect(ociContentConfigChanged(obj)).To(Equal(tt.want)) + }) + } +} diff --git a/controllers/source_predicate.go b/internal/controller/source_predicate.go similarity index 94% rename from controllers/source_predicate.go rename to internal/controller/source_predicate.go index 47dc73c28..968f2def9 100644 --- a/controllers/source_predicate.go +++ b/internal/controller/source_predicate.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) type SourceRevisionChangePredicate struct { diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go new file mode 100644 index 000000000..ad0365616 --- /dev/null +++ b/internal/controller/suite_test.go @@ -0,0 +1,468 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "log" + "math/rand" + "net" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + "github.com/distribution/distribution/v3/configuration" + dockerRegistry "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" + "github.com/foxcpp/go-mockdns" + "github.com/phayes/freeport" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/bcrypt" + "helm.sh/helm/v3/pkg/getter" + helmreg "helm.sh/helm/v3/pkg/registry" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/cache" + // +kubebuilder:scaffold:imports +) + +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. + +const ( + timeout = 10 * time.Second + interval = 1 * time.Second + retentionTTL = 2 * time.Second + retentionRecords = 2 +) + +const ( + testRegistryHtpasswdFileBasename = "authtest.htpasswd" + testRegistryUsername = "myuser" + testRegistryPassword = "mypass" +) + +var ( + k8sClient client.Client + testEnv *testenv.Environment + testStorage *storage.Storage + testServer *testserver.ArtifactServer + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) + +var ( + testGetters = getter.Providers{ + getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }, + getter.Provider{ + Schemes: []string{"oci"}, + New: getter.NewOCIGetter, + }, + } +) + +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte + clientPublicKey []byte + clientPrivateKey []byte +) + +var ( + testRegistryServer *registryClientTestServer + testCache *cache.Cache +) + +type registryClientTestServer struct { + out io.Writer + registryHost string + workspaceDir string + registryClient *helmreg.Client + dnsServer *mockdns.Server +} + +type registryOptions struct { + withBasicAuth bool + withTLS bool + withClientCertAuth bool +} + +func setupRegistryServer(ctx context.Context, workspaceDir string, opts registryOptions) (*registryClientTestServer, error) { + server := ®istryClientTestServer{} + + if workspaceDir == "" { + return nil, fmt.Errorf("workspace directory cannot be an empty string") + } + + server.workspaceDir = workspaceDir + + var out bytes.Buffer + server.out = &out + + // init test client options + clientOpts := []helmreg.ClientOption{ + helmreg.ClientOptDebug(true), + helmreg.ClientOptWriter(server.out), + } + + config := &configuration.Configuration{} + port, err := freeport.GetFreePort() + if err != nil { + return nil, fmt.Errorf("failed to get free port: %s", err) + } + + // Change the registry host to a host which is not localhost and + // mock DNS to map example.com to 127.0.0.1. + // This is required because Docker enforces HTTP if the registry + // is hosted on localhost/127.0.0.1. + if opts.withTLS { + server.registryHost = fmt.Sprintf("example.com:%d", port) + // Disable DNS server logging as it is extremely chatty. + dnsLog := log.Default() + dnsLog.SetOutput(io.Discard) + server.dnsServer, err = mockdns.NewServerWithLogger(map[string]mockdns.Zone{ + "example.com.": { + A: []string{"127.0.0.1"}, + }, + }, dnsLog, false) + if err != nil { + return nil, err + } + server.dnsServer.PatchNet(net.DefaultResolver) + } else { + server.registryHost = fmt.Sprintf("127.0.0.1:%d", port) + } + + config.HTTP.Addr = fmt.Sprintf(":%d", port) + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + + if opts.withBasicAuth { + // create htpasswd file (w BCrypt, which is required) + pwBytes, err := bcrypt.GenerateFromPassword([]byte(testRegistryPassword), bcrypt.DefaultCost) + if err != nil { + return nil, fmt.Errorf("failed to generate password: %s", err) + } + + htpasswdPath := filepath.Join(workspaceDir, testRegistryHtpasswdFileBasename) + if err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testRegistryUsername, string(pwBytes))), 0644); err != nil { + return nil, fmt.Errorf("failed to create htpasswd file: %s", err) + } + + // Registry config + config.Auth = configuration.Auth{ + "htpasswd": configuration.Parameters{ + "realm": "localhost", + "path": htpasswdPath, + }, + } + } + + if opts.withTLS { + config.HTTP.TLS.Certificate = "testdata/certs/server.pem" + config.HTTP.TLS.Key = "testdata/certs/server-key.pem" + // Configure CA certificates only if client cert authentication is enabled. + if opts.withClientCertAuth { + config.HTTP.TLS.ClientCAs = []string{"testdata/certs/ca.pem"} + } + + // add TLS configured HTTP client option to clientOpts + httpClient, err := tlsConfiguredHTTPCLient() + if err != nil { + return nil, fmt.Errorf("failed to create TLS configured HTTP client: %s", err) + } + clientOpts = append(clientOpts, helmreg.ClientOptHTTPClient(httpClient)) + } else { + clientOpts = append(clientOpts, helmreg.ClientOptPlainHTTP()) + } + + // setup logger options + config.Log.AccessLog.Disabled = true + config.Log.Level = "error" + logrus.SetOutput(io.Discard) + + registry, err := dockerRegistry.NewRegistry(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to create docker registry: %w", err) + } + + // init test client + helmClient, err := helmreg.NewClient(clientOpts...) + if err != nil { + return nil, fmt.Errorf("failed to create registry client: %s", err) + } + server.registryClient = helmClient + + // Start Docker registry + go registry.ListenAndServe() + + return server, nil +} + +func tlsConfiguredHTTPCLient() (*http.Client, error) { + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(tlsCA) { + return nil, fmt.Errorf("failed to append CA certificate to pool") + } + cert, err := tls.LoadX509KeyPair("testdata/certs/server.pem", "testdata/certs/server-key.pem") + if err != nil { + return nil, fmt.Errorf("failed to load server certificate: %s", err) + } + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + Certificates: []tls.Certificate{ + cert, + }, + }, + }, + } + return httpClient, nil +} + +func (r *registryClientTestServer) Close() { + if r.dnsServer != nil { + mockdns.UnpatchNet(net.DefaultResolver) + r.dnsServer.Close() + } +} + +func TestMain(m *testing.M) { + initTestTLS() + + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + testEnv = testenv.New( + testenv.WithCRDPath(filepath.Join("..", "..", "config", "crd", "bases")), + testenv.WithMaxConcurrentReconciles(4), + ) + + var err error + // Initialize a cacheless client for tests that need the latest objects. + k8sClient, err = client.New(testEnv.Config, client.Options{Scheme: scheme.Scheme}) + if err != nil { + panic(fmt.Sprintf("failed to create k8s client: %v", err)) + } + + testServer, err = testserver.NewTempArtifactServer() + if err != nil { + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) + } + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) + if err != nil { + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) + } + + testMetricsH = controller.NewMetrics(testEnv, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer) + + testWorkspaceDir, err := os.MkdirTemp("", "registry-test-") + if err != nil { + panic(fmt.Sprintf("failed to create workspace directory: %v", err)) + } + testRegistryServer, err = setupRegistryServer(ctx, testWorkspaceDir, registryOptions{ + withBasicAuth: true, + }) + if err != nil { + panic(fmt.Sprintf("Failed to create a test registry server: %v", err)) + } + defer testRegistryServer.Close() + + if err := (&GitRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManagerAndOptions(testEnv, GitRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { + panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + } + + if err := (&BucketReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManagerAndOptions(testEnv, BucketReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } + + testCache = cache.New(5, 1*time.Second) + cacheRecorder := cache.MustMakeMetrics() + + if err := (&OCIRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManagerAndOptions(testEnv, OCIRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { + panic(fmt.Sprintf("Failed to start OCIRepositoryReconciler: %v", err)) + } + + if err := (&HelmRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Getters: testGetters, + Storage: testStorage, + Cache: testCache, + TTL: 1 * time.Second, + CacheRecorder: cacheRecorder, + }).SetupWithManagerAndOptions(testEnv, HelmRepositoryReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { + panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) + } + + if err := (&HelmChartReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Getters: testGetters, + Storage: testStorage, + Cache: testCache, + TTL: 1 * time.Second, + CacheRecorder: cacheRecorder, + }).SetupWithManagerAndOptions(ctx, testEnv, HelmChartReconcilerOptions{ + RateLimiter: controller.GetDefaultRateLimiter(), + }); err != nil { + panic(fmt.Sprintf("Failed to start HelmChartReconciler: %v", err)) + } + + go func() { + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } + }() + <-testEnv.Manager.Elected() + + code := m.Run() + + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } + + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) + } + + if err := os.RemoveAll(testWorkspaceDir); err != nil { + panic(fmt.Sprintf("Failed to remove registry workspace dir: %v", err)) + } + + os.Exit(code) +} + +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + panic(err) + } + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + panic(err) + } + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") + if err != nil { + panic(err) + } + clientPrivateKey, err = os.ReadFile("testdata/certs/client-key.pem") + if err != nil { + panic(err) + } + clientPublicKey, err = os.ReadFile("testdata/certs/client.pem") + if err != nil { + panic(err) + } +} + +func newTestStorage(s *testserver.HTTPServer) (*storage.Storage, error) { + opts := &config.Options{ + StoragePath: s.Root(), + StorageAddress: s.URL(), + StorageAdvAddress: s.URL(), + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + if err != nil { + return nil, err + } + return st, nil +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +func int64p(i int64) *int64 { + return &i +} + +func logOCIRepoStatus(t *testing.T, obj *sourcev1.OCIRepository) { + sts, _ := yaml.Marshal(obj.Status) + t.Log(string(sts)) +} diff --git a/controllers/testdata/certs/Makefile b/internal/controller/testdata/certs/Makefile similarity index 76% rename from controllers/testdata/certs/Makefile rename to internal/controller/testdata/certs/Makefile index dca2408c3..22b40466b 100644 --- a/controllers/testdata/certs/Makefile +++ b/internal/controller/testdata/certs/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -all: server-key.pem +all: server-key.pem client-key.pem ca-key.pem: ca-csr.json cfssl gencert -initca ca-csr.json | cfssljson -bare ca – @@ -28,3 +28,13 @@ server-key.pem: server-csr.json ca-config.json ca-key.pem server-csr.json | cfssljson -bare server sever.pem: server-key.pem server.csr: server-key.pem + +client-key.pem: client-csr.json ca-config.json ca-key.pem + cfssl gencert \ + -ca=ca.pem \ + -ca-key=ca-key.pem \ + -config=ca-config.json \ + -profile=web-servers \ + client-csr.json | cfssljson -bare client +client.pem: client-key.pem +client.csr: client-key.pem diff --git a/controllers/testdata/certs/ca-config.json b/internal/controller/testdata/certs/ca-config.json similarity index 100% rename from controllers/testdata/certs/ca-config.json rename to internal/controller/testdata/certs/ca-config.json diff --git a/controllers/testdata/certs/ca-csr.json b/internal/controller/testdata/certs/ca-csr.json similarity index 100% rename from controllers/testdata/certs/ca-csr.json rename to internal/controller/testdata/certs/ca-csr.json diff --git a/internal/controller/testdata/certs/ca-key.pem b/internal/controller/testdata/certs/ca-key.pem new file mode 100644 index 000000000..5f78af275 --- /dev/null +++ b/internal/controller/testdata/certs/ca-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEICJFvVFVBSL0EteniBRfI9M1tm9Vmh9CKv7dhvZSqtV6oAoGCCqGSM49 +AwEHoUQDQgAE+EGQ9wZw/XIbyCwu7wvbzoGhpE2KtZwSUXboPEAgacfaqfgdT92D +If9qYie8umbgUymQnnqN8fRnT/wqqdBLDg== +-----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/ca.csr b/internal/controller/testdata/certs/ca.csr new file mode 100644 index 000000000..ed5490ce2 --- /dev/null +++ b/internal/controller/testdata/certs/ca.csr @@ -0,0 +1,9 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBHzCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6gSzBJBgkqhkiG9w0BCQ4x +PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt +cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiEA1PxOWSIrmLb5IeejHvfx +AkjpamR/GTLhSzXlGv1hCmsCIDSeZL2OF5R5k2v4giXiB6GUfmawykGkO2fIG1kq +5l5V +-----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/ca.pem b/internal/controller/testdata/certs/ca.pem new file mode 100644 index 000000000..72644519d --- /dev/null +++ b/internal/controller/testdata/certs/ca.pem @@ -0,0 +1,11 @@ +-----BEGIN CERTIFICATE----- +MIIBiDCCAS2gAwIBAgIUCRPU/Fa1nIWlk7TUejHGI+WKJFAwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzAw +NDIxMDcwNTAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6jUzBRMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS+cS2gBCfSCltLUMNY0kG2 +mj9zEDAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0kAMEYCIQC33kO/m+ab +i/2dlkg7hab4jCkFkxV3fWiOP0lbrLIMYQIhAPOcHeXmGE32apXKoZ6IfGJdMtz1 +3bkHYeqNs2qtpQ/5 +-----END CERTIFICATE----- diff --git a/controllers/testdata/certs/server-csr.json b/internal/controller/testdata/certs/client-csr.json similarity index 100% rename from controllers/testdata/certs/server-csr.json rename to internal/controller/testdata/certs/client-csr.json diff --git a/internal/controller/testdata/certs/client-key.pem b/internal/controller/testdata/certs/client-key.pem new file mode 100644 index 000000000..f55b40b4d --- /dev/null +++ b/internal/controller/testdata/certs/client-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIFVLYwGEhqLW/WYnsA9om6cSxcgVsKnwIWXc34DF7LpwoAoGCCqGSM49 +AwEHoUQDQgAE5H76We32W5cQq8DRJT+pteyh53GUBiI5IbM+qVWgsCIFJEaSJKgs +mv1H7c3NhP292Pgr6vdWJACLQHzmpsVpmg== +-----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/client.csr b/internal/controller/testdata/certs/client.csr new file mode 100644 index 000000000..3699ea27b --- /dev/null +++ b/internal/controller/testdata/certs/client.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqgSzBJBgkqhkiG9w0BCQ4xPDA6 +MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiB0px2gw2ICFz26zAajtJyoNHl+ +inOXY5ohtzP4ag+NXQIgAbjIsOUuQ7JT31DdI6yCVfO014hHawtEsdV4rxTrQMA= +-----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/client.pem b/internal/controller/testdata/certs/client.pem new file mode 100644 index 000000000..9db876e59 --- /dev/null +++ b/internal/controller/testdata/certs/client.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB7DCCAZKgAwIBAgIUPH5zyEsXoFMCMkZaM2s6YtnoQcgwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqjgbowgbcwDgYDVR0PAQH/BAQD +AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MB0GA1UdDgQWBBTqud4vpysQdb1/5K3RoDXvBdQGgzAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIhAM0u +Eo6u3BDtw5bezhLa/THDy4QT63ktpAff9i/QJOErAiAifOvW7n5ZTLjjSnJ+dCtr +Avtupcg1WLyryhliqtNKhg== +-----END CERTIFICATE----- diff --git a/internal/controller/testdata/certs/server-csr.json b/internal/controller/testdata/certs/server-csr.json new file mode 100644 index 000000000..0baf11601 --- /dev/null +++ b/internal/controller/testdata/certs/server-csr.json @@ -0,0 +1,9 @@ +{ + "CN": "example.com", + "hosts": [ + "127.0.0.1", + "localhost", + "example.com", + "www.example.com" + ] +} diff --git a/internal/controller/testdata/certs/server-key.pem b/internal/controller/testdata/certs/server-key.pem new file mode 100644 index 000000000..64d7da136 --- /dev/null +++ b/internal/controller/testdata/certs/server-key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIH19RQir/x9wHNAvHITu7/3Y4ckQ3GsNyEGYF3/nalheoAoGCCqGSM49 +AwEHoUQDQgAEvqlooNIpRmCjv9yBzjqoyXZvcU8zo9npYm3HPX7TReYetrkkJh/P +6a5NDJhnWemcj9iZdm2kGTE7MCgGi4mRog== +-----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/server.csr b/internal/controller/testdata/certs/server.csr new file mode 100644 index 000000000..b0fce1781 --- /dev/null +++ b/internal/controller/testdata/certs/server.csr @@ -0,0 +1,8 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKgSzBJBgkqhkiG9w0BCQ4xPDA6 +MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiAJbvDLjrCkTRvTjrv2wXLN9Hgu +p6SrTQJUWlIj3S8DggIgJraxPvnwfeKE5dM7ZgJXADHy838h04dQ+Za7hS899V8= +-----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/server.pem b/internal/controller/testdata/certs/server.pem new file mode 100644 index 000000000..f3345e3b2 --- /dev/null +++ b/internal/controller/testdata/certs/server.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB6zCCAZKgAwIBAgIUSGuttQSdoyWQzeZ6GkiKORYYUvQwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKjgbowgbcwDgYDVR0PAQH/BAQD +AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MB0GA1UdDgQWBBSNrNAk9jWUcFjxjAKzuDwsBrG1NDAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDRwAwRAIgIcrb +xGgcRsmP/R6Qo+Xe/w1UvNDaWJfsWO+hq1DtOQgCIEyGi3ClowsGnNpo734ArWbG +taem7qVKZJmCWRM6DFuT +-----END CERTIFICATE----- diff --git a/controllers/testdata/charts/helmchart-0.1.0.tgz b/internal/controller/testdata/charts/helmchart-0.1.0.tgz similarity index 100% rename from controllers/testdata/charts/helmchart-0.1.0.tgz rename to internal/controller/testdata/charts/helmchart-0.1.0.tgz diff --git a/controllers/testdata/charts/helmchart/.helmignore b/internal/controller/testdata/charts/helmchart/.helmignore similarity index 100% rename from controllers/testdata/charts/helmchart/.helmignore rename to internal/controller/testdata/charts/helmchart/.helmignore diff --git a/controllers/testdata/charts/helmchart/Chart.yaml b/internal/controller/testdata/charts/helmchart/Chart.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/Chart.yaml rename to internal/controller/testdata/charts/helmchart/Chart.yaml diff --git a/internal/controller/testdata/charts/helmchart/duplicate.yaml b/internal/controller/testdata/charts/helmchart/duplicate.yaml new file mode 100644 index 000000000..5f7ae58bd --- /dev/null +++ b/internal/controller/testdata/charts/helmchart/duplicate.yaml @@ -0,0 +1,70 @@ +# Default values for helmchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Values for tests +testDefault: true +testOverride: false diff --git a/controllers/testdata/charts/helmchart/override.yaml b/internal/controller/testdata/charts/helmchart/override.yaml similarity index 96% rename from controllers/testdata/charts/helmchart/override.yaml rename to internal/controller/testdata/charts/helmchart/override.yaml index e08cec5bf..d01d3acfa 100644 --- a/controllers/testdata/charts/helmchart/override.yaml +++ b/internal/controller/testdata/charts/helmchart/override.yaml @@ -64,3 +64,7 @@ nodeSelector: {} tolerations: [] affinity: {} + +# Values for tests +# testDefault: true +testOverride: true diff --git a/controllers/testdata/charts/helmchart/templates/NOTES.txt b/internal/controller/testdata/charts/helmchart/templates/NOTES.txt similarity index 100% rename from controllers/testdata/charts/helmchart/templates/NOTES.txt rename to internal/controller/testdata/charts/helmchart/templates/NOTES.txt diff --git a/controllers/testdata/charts/helmchart/templates/_helpers.tpl b/internal/controller/testdata/charts/helmchart/templates/_helpers.tpl similarity index 100% rename from controllers/testdata/charts/helmchart/templates/_helpers.tpl rename to internal/controller/testdata/charts/helmchart/templates/_helpers.tpl diff --git a/controllers/testdata/charts/helmchart/templates/deployment.yaml b/internal/controller/testdata/charts/helmchart/templates/deployment.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/deployment.yaml rename to internal/controller/testdata/charts/helmchart/templates/deployment.yaml diff --git a/controllers/testdata/charts/helmchart/templates/ingress.yaml b/internal/controller/testdata/charts/helmchart/templates/ingress.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/ingress.yaml rename to internal/controller/testdata/charts/helmchart/templates/ingress.yaml diff --git a/controllers/testdata/charts/helmchart/templates/service.yaml b/internal/controller/testdata/charts/helmchart/templates/service.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/service.yaml rename to internal/controller/testdata/charts/helmchart/templates/service.yaml diff --git a/controllers/testdata/charts/helmchart/templates/serviceaccount.yaml b/internal/controller/testdata/charts/helmchart/templates/serviceaccount.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/serviceaccount.yaml rename to internal/controller/testdata/charts/helmchart/templates/serviceaccount.yaml diff --git a/controllers/testdata/charts/helmchart/templates/tests/test-connection.yaml b/internal/controller/testdata/charts/helmchart/templates/tests/test-connection.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/templates/tests/test-connection.yaml rename to internal/controller/testdata/charts/helmchart/templates/tests/test-connection.yaml diff --git a/internal/controller/testdata/charts/helmchart/values.yaml b/internal/controller/testdata/charts/helmchart/values.yaml new file mode 100644 index 000000000..5f7ae58bd --- /dev/null +++ b/internal/controller/testdata/charts/helmchart/values.yaml @@ -0,0 +1,70 @@ +# Default values for helmchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Values for tests +testDefault: true +testOverride: false diff --git a/controllers/testdata/charts/helmchartwithdeps/.helmignore b/internal/controller/testdata/charts/helmchartwithdeps/.helmignore similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/.helmignore rename to internal/controller/testdata/charts/helmchartwithdeps/.helmignore diff --git a/controllers/testdata/charts/helmchartwithdeps/Chart.yaml b/internal/controller/testdata/charts/helmchartwithdeps/Chart.yaml similarity index 92% rename from controllers/testdata/charts/helmchartwithdeps/Chart.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/Chart.yaml index 99dac50b9..0251612c0 100644 --- a/controllers/testdata/charts/helmchartwithdeps/Chart.yaml +++ b/internal/controller/testdata/charts/helmchartwithdeps/Chart.yaml @@ -31,3 +31,6 @@ dependencies: - name: grafana version: ">=5.7.0" repository: "https://grafana.github.io/helm-charts" + - name: podinfo + version: ">=6.1.*" + repository: "oci://ghcr.io/stefanprodan/charts" diff --git a/internal/controller/testdata/charts/helmchartwithdeps/override.yaml b/internal/controller/testdata/charts/helmchartwithdeps/override.yaml new file mode 100644 index 000000000..d01d3acfa --- /dev/null +++ b/internal/controller/testdata/charts/helmchartwithdeps/override.yaml @@ -0,0 +1,70 @@ +# Override values for helmchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 3 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Values for tests +# testDefault: true +testOverride: true diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/NOTES.txt b/internal/controller/testdata/charts/helmchartwithdeps/templates/NOTES.txt similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/NOTES.txt rename to internal/controller/testdata/charts/helmchartwithdeps/templates/NOTES.txt diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/_helpers.tpl b/internal/controller/testdata/charts/helmchartwithdeps/templates/_helpers.tpl similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/_helpers.tpl rename to internal/controller/testdata/charts/helmchartwithdeps/templates/_helpers.tpl diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/deployment.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/deployment.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/deployment.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/deployment.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/ingress.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/ingress.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/ingress.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/ingress.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/service.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/service.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/service.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/service.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/serviceaccount.yaml diff --git a/controllers/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml b/internal/controller/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml rename to internal/controller/testdata/charts/helmchartwithdeps/templates/tests/test-connection.yaml diff --git a/internal/controller/testdata/charts/helmchartwithdeps/values.yaml b/internal/controller/testdata/charts/helmchartwithdeps/values.yaml new file mode 100644 index 000000000..cfee81515 --- /dev/null +++ b/internal/controller/testdata/charts/helmchartwithdeps/values.yaml @@ -0,0 +1,70 @@ +# Default values for helmchartwithdeps. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Values for tests +testDefault: true +testOverride: false diff --git a/internal/controller/testdata/git/repository/.sourceignore b/internal/controller/testdata/git/repository/.sourceignore new file mode 100644 index 000000000..989478d13 --- /dev/null +++ b/internal/controller/testdata/git/repository/.sourceignore @@ -0,0 +1 @@ +**.txt diff --git a/internal/fs/testdata/test.file b/internal/controller/testdata/git/repository/foo.txt similarity index 100% rename from internal/fs/testdata/test.file rename to internal/controller/testdata/git/repository/foo.txt diff --git a/internal/controller/testdata/git/repository/manifest.yaml b/internal/controller/testdata/git/repository/manifest.yaml new file mode 100644 index 000000000..220e1b33e --- /dev/null +++ b/internal/controller/testdata/git/repository/manifest.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dummy diff --git a/internal/controller/testdata/git/repowithsubdirs/.sourceignore b/internal/controller/testdata/git/repowithsubdirs/.sourceignore new file mode 100644 index 000000000..1753c9189 --- /dev/null +++ b/internal/controller/testdata/git/repowithsubdirs/.sourceignore @@ -0,0 +1,6 @@ +# Exclude all +/* + +# Include manifest directories +!/apps/ +!/clusters/ diff --git a/internal/controller/testdata/git/repowithsubdirs/apps/manifest.yaml b/internal/controller/testdata/git/repowithsubdirs/apps/manifest.yaml new file mode 100644 index 000000000..fd6e0f901 --- /dev/null +++ b/internal/controller/testdata/git/repowithsubdirs/apps/manifest.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: apps diff --git a/internal/controller/testdata/git/repowithsubdirs/clusters/manifest.yaml b/internal/controller/testdata/git/repowithsubdirs/clusters/manifest.yaml new file mode 100644 index 000000000..84a8f643f --- /dev/null +++ b/internal/controller/testdata/git/repowithsubdirs/clusters/manifest.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: clusters diff --git a/internal/controller/testdata/git/repowithsubdirs/foo.txt b/internal/controller/testdata/git/repowithsubdirs/foo.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/controller/testdata/oci/repository/foo.txt b/internal/controller/testdata/oci/repository/foo.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.4.tar b/internal/controller/testdata/podinfo/podinfo-6.1.4.tar new file mode 100644 index 000000000..dbc58051d Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.4.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar new file mode 100644 index 000000000..09616c2df Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.6.tar b/internal/controller/testdata/podinfo/podinfo-6.1.6.tar new file mode 100644 index 000000000..09616c2df Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.6.tar differ diff --git a/internal/error/error.go b/internal/error/error.go new file mode 100644 index 000000000..cb3a8cd78 --- /dev/null +++ b/internal/error/error.go @@ -0,0 +1,167 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "time" + + corev1 "k8s.io/api/core/v1" +) + +// EventTypeNone indicates no error event. It can be used to disable error +// events. +const EventTypeNone = "None" + +// Config is the error configuration. It is embedded in the errors and can be +// used to configure how the error should be handled. These configurations +// mostly define actions to be taken on the errors. Not all the configurations +// may apply to every error. +type Config struct { + // Event is the event type of an error. It is used to configure what type of + // event an error should result in. + // Valid values: + // - EventTypeNone + // - corev1.EventTypeNormal + // - corev1.EventTypeWarning + Event string + // Log is used to configure if an error should be logged. The log level is + // derived from the Event type. + // None event - info log + // Normal event - info log + // Warning event - error log + Log bool + // Notification is used to emit an error as a notification alert to a + // a notification service. + Notification bool + // Ignore is used to suppress the error for no-op reconciliations. It may + // be applicable to non-contextual errors only. + Ignore bool +} + +// Stalling is the reconciliation stalled state error. It contains an error +// and a reason for the stalled condition. It is a contextual error, used to +// express the scenario which contributed to the reconciliation result. +type Stalling struct { + // Reason is the stalled condition reason string. + Reason string + // Err is the error that caused stalling. This can be used as the message in + // stalled condition. + Err error + // Config is the error handler configuration. + Config +} + +// Error implements error interface. +func (se *Stalling) Error() string { + return se.Err.Error() +} + +// Unwrap returns the underlying error. +func (se *Stalling) Unwrap() error { + return se.Err +} + +// NewStalling constructs a new Stalling error with default configuration. +func NewStalling(err error, reason string) *Stalling { + // Stalling errors are not returned to the runtime. Log it explicitly. + // Since this failure requires user interaction, send warning notification. + return &Stalling{ + Reason: reason, + Err: err, + Config: Config{ + Event: corev1.EventTypeWarning, + Log: true, + Notification: true, + }, + } +} + +// Waiting is the reconciliation wait state error. It contains an error, wait +// duration and a reason for the wait. It is a contextual error, used to express +// the scenario which contributed to the reconciliation result. +// It is for scenarios where a reconciliation needs to wait for something else +// to take place first. +type Waiting struct { + // RequeueAfter is the wait duration after which to requeue. + RequeueAfter time.Duration + // Reason is the reason for the wait. + Reason string + // Err is the error that caused the wait. + Err error + // Config is the error handler configuration. + Config +} + +// Error implements error interface. +func (we *Waiting) Error() string { + return we.Err.Error() +} + +// Unwrap returns the underlying error. +func (we *Waiting) Unwrap() error { + return we.Err +} + +// NewWaiting constructs a new Waiting error with default configuration. +func NewWaiting(err error, reason string) *Waiting { + // Waiting errors are not returned to the runtime. Log it explicitly. + // Since this failure results in reconciliation delay, send warning + // notification. + return &Waiting{ + Reason: reason, + Err: err, + Config: Config{ + Event: corev1.EventTypeNormal, + Log: true, + }, + } +} + +// Generic error is a generic reconcile error. It can be used in scenarios that +// don't have any special contextual meaning. +type Generic struct { + // Reason is the reason for the generic error. + Reason string + // Error is the error that caused the generic error. + Err error + // Config is the error handler configuration. + Config +} + +// Error implements error interface. +func (g *Generic) Error() string { + return g.Err.Error() +} + +// Unwrap returns the underlying error. +func (g *Generic) Unwrap() error { + return g.Err +} + +// NewGeneric constructs a new Generic error with default configuration. +func NewGeneric(err error, reason string) *Generic { + // Since it's a generic error, it'll be returned to the runtime and logged + // automatically, do not log it. Send failure notification. + return &Generic{ + Reason: reason, + Err: err, + Config: Config{ + Event: corev1.EventTypeWarning, + Notification: true, + }, + } +} diff --git a/internal/error/sanitized.go b/internal/error/sanitized.go new file mode 100644 index 000000000..04f6ccf92 --- /dev/null +++ b/internal/error/sanitized.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "fmt" + "net/url" + "regexp" +) + +type SanitizedError struct { + err string +} + +func (e SanitizedError) Error() string { + return e.err +} + +// SanitizeError extracts all URLs from the error message +// and replaces them with the URL without the query string. +func SanitizeError(err error) SanitizedError { + errorMessage := err.Error() + for _, u := range extractURLs(errorMessage) { + urlWithoutQueryString, err := removeQueryString(u) + if err == nil { + re, err := regexp.Compile(fmt.Sprintf("%s*", regexp.QuoteMeta(u))) + if err == nil { + errorMessage = re.ReplaceAllString(errorMessage, urlWithoutQueryString) + } + } + } + + return SanitizedError{errorMessage} +} + +// removeQueryString takes a URL string as input and returns the URL without the query string. +func removeQueryString(urlStr string) (string, error) { + // Parse the URL. + u, err := url.Parse(urlStr) + if err != nil { + return "", err + } + + // Rebuild the URL without the query string. + u.RawQuery = "" + return u.String(), nil +} + +// extractURLs takes a log message as input and returns the URLs found. +func extractURLs(logMessage string) []string { + // Define a regular expression to match a URL. + // This is a simple pattern and might need to be adjusted depending on the log message format. + urlRegex := regexp.MustCompile(`https?://[^\s]+`) + + // Find the first match in the log message. + matches := urlRegex.FindAllString(logMessage, -1) + if len(matches) == 0 { + return []string{} + } + + return matches +} diff --git a/internal/error/sanitized_test.go b/internal/error/sanitized_test.go new file mode 100644 index 000000000..e9c6a858b --- /dev/null +++ b/internal/error/sanitized_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" +) + +func Test_extractURLs(t *testing.T) { + + tests := []struct { + name string + logMessage string + wantUrls []string + }{ + { + name: "Log Contains single URL", + logMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\": dial tcp 20.60.53.129:443: connect: connection refused", + wantUrls: []string{"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\":"}, + }, + { + name: "Log Contains multiple URL", + logMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no : dial tcp 20.60.53.129:443: connect: connection refused", + wantUrls: []string{ + "https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es", + "https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no", + }, + }, + { + name: "Log Contains No URL", + logMessage: "Log message without URL", + wantUrls: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + urls := extractURLs(tt.logMessage) + + g.Expect(len(urls)).To(Equal(len(tt.wantUrls))) + for i := range tt.wantUrls { + g.Expect(urls[i]).To(Equal(tt.wantUrls[i])) + } + }) + } +} + +func Test_removeQueryString(t *testing.T) { + + tests := []struct { + name string + urlStr string + wantUrl string + }{ + { + name: "URL with query string", + urlStr: "https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02", + wantUrl: "https://blobstorage.blob.core.windows.net/container/index.yaml", + }, + { + name: "URL without query string", + urlStr: "https://blobstorage.blob.core.windows.net/container/index.yaml", + wantUrl: "https://blobstorage.blob.core.windows.net/container/index.yaml", + }, + { + name: "URL with query string and port", + urlStr: "https://blobstorage.blob.core.windows.net:443/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02", + wantUrl: "https://blobstorage.blob.core.windows.net:443/container/index.yaml", + }, + { + name: "Invalid URL", + urlStr: "NoUrl", + wantUrl: "NoUrl", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + urlWithoutQueryString, err := removeQueryString(tt.urlStr) + + g.Expect(err).To(BeNil()) + g.Expect(urlWithoutQueryString).To(Equal(tt.wantUrl)) + }) + } +} + +func Test_SanitizeError(t *testing.T) { + + tests := []struct { + name string + errMessage string + wantErrMessage string + }{ + { + name: "Log message with URL with query string", + errMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\": dial tcp 20.60.53.129:443: connect: connection refused", + wantErrMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml dial tcp 20.60.53.129:443: connect: connection refused", + }, + { + name: "Log message without URL", + errMessage: "Log message contains no URL", + wantErrMessage: "Log message contains no URL", + }, + + { + name: "Log message with multiple Urls", + errMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no dial tcp 20.60.53.129:443: connect: connection refused", + wantErrMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml https://blobstorage1.blob.core.windows.net/container/index.yaml dial tcp 20.60.53.129:443: connect: connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := SanitizeError(errors.New(tt.errMessage)) + g.Expect(err.Error()).To(Equal(tt.wantErrMessage)) + }) + } +} diff --git a/internal/features/features.go b/internal/features/features.go new file mode 100644 index 000000000..edb9beb17 --- /dev/null +++ b/internal/features/features.go @@ -0,0 +1,66 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package features sets the feature gates that +// source-controller supports, and their default +// states. +package features + +import ( + "github.com/fluxcd/pkg/auth" + feathelper "github.com/fluxcd/pkg/runtime/features" +) + +const ( + // CacheSecretsAndConfigMaps controls whether secrets and configmaps should be cached. + // + // When enabled, it will cache both object types, resulting in increased memory usage + // and cluster-wide RBAC permissions (list and watch). + CacheSecretsAndConfigMaps = "CacheSecretsAndConfigMaps" +) + +var features = map[string]bool{ + // CacheSecretsAndConfigMaps + // opt-in from v0.34 + CacheSecretsAndConfigMaps: false, +} + +func init() { + auth.SetFeatureGates(features) +} + +// FeatureGates contains a list of all supported feature gates and +// their default values. +func FeatureGates() map[string]bool { + return features +} + +// Enabled verifies whether the feature is enabled or not. +// +// This is only a wrapper around the Enabled func in +// pkg/runtime/features, so callers won't need to import +// both packages for checking whether a feature is enabled. +func Enabled(feature string) (bool, error) { + return feathelper.Enabled(feature) +} + +// Disable disables the specified feature. If the feature is not +// present, it's a no-op. +func Disable(feature string) { + if _, ok := features[feature]; ok { + features[feature] = false + } +} diff --git a/internal/fs/LICENSE b/internal/fs/LICENSE deleted file mode 100644 index a2dd15faf..000000000 --- a/internal/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/fs/fs.go b/internal/fs/fs.go deleted file mode 100644 index c8ece049d..000000000 --- a/internal/fs/fs.go +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "syscall" -) - -// RenameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-device link error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func RenameWithFallback(src, dst string) error { - _, err := os.Stat(src) - if err != nil { - return fmt.Errorf("cannot stat %s: %w", src, err) - } - - err = os.Rename(src, dst) - if err == nil { - return nil - } - - return renameFallback(err, src, dst) -} - -// renameByCopy attempts to rename a file or directory by copying it to the -// destination and then removing the src thus emulating the rename behavior. -func renameByCopy(src, dst string) error { - var cerr error - if dir, _ := IsDir(src); dir { - cerr = CopyDir(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying directory failed: %w", cerr) - } - } else { - cerr = copyFile(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying file failed: %w", cerr) - } - } - - if cerr != nil { - return fmt.Errorf("rename fallback failed: cannot rename %s to %s: %w", src, dst, cerr) - } - - if err := os.RemoveAll(src); err != nil { - return fmt.Errorf("cannot delete %s: %w", src, err) - } - - return nil -} - -var ( - errSrcNotDir = errors.New("source is not a directory") - errDstExist = errors.New("destination already exists") -) - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -func CopyDir(src, dst string) error { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - // We use os.Lstat() here to ensure we don't fall in a loop where a symlink - // actually links to a one of its parent directories. - fi, err := os.Lstat(src) - if err != nil { - return err - } - if !fi.IsDir() { - return errSrcNotDir - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - return errDstExist - } - - if err = os.MkdirAll(dst, fi.Mode()); err != nil { - return fmt.Errorf("cannot mkdir %s: %w", dst, err) - } - - entries, err := ioutil.ReadDir(src) - if err != nil { - return fmt.Errorf("cannot read directory %s: %w", dst, err) - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - if err = CopyDir(srcPath, dstPath); err != nil { - return fmt.Errorf("copying directory failed: %w", err) - } - } else { - // This will include symlinks, which is what we want when - // copying things. - if err = copyFile(srcPath, dstPath); err != nil { - return fmt.Errorf("copying file failed: %w", err) - } - } - } - - return nil -} - -// copyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all its contents will be replaced by the contents -// of the source file. The file mode will be copied from the source. -func copyFile(src, dst string) (err error) { - if sym, err := IsSymlink(src); err != nil { - return fmt.Errorf("symlink check failed: %w", err) - } else if sym { - if err := cloneSymlink(src, dst); err != nil { - if runtime.GOOS == "windows" { - // If cloning the symlink fails on Windows because the user - // does not have the required privileges, ignore the error and - // fall back to copying the file contents. - // - // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx - if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { - return err - } - } else { - return err - } - } else { - return nil - } - } - - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - if _, err = io.Copy(out, in); err != nil { - out.Close() - return - } - - // Check for write errors on Close - if err = out.Close(); err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - - // Temporary fix for Go < 1.9 - // - // See: https://github.com/golang/dep/issues/774 - // and https://github.com/golang/go/issues/20829 - if runtime.GOOS == "windows" { - dst = fixLongPath(dst) - } - err = os.Chmod(dst, si.Mode()) - - return -} - -// cloneSymlink will create a new symlink that points to the resolved path of sl. -// If sl is a relative symlink, dst will also be a relative symlink. -func cloneSymlink(sl, dst string) error { - resolved, err := os.Readlink(sl) - if err != nil { - return err - } - - return os.Symlink(resolved, dst) -} - -// IsDir determines is the path given is a directory or not. -func IsDir(name string) (bool, error) { - fi, err := os.Stat(name) - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, fmt.Errorf("%q is not a directory", name) - } - return true, nil -} - -// IsSymlink determines if the given path is a symbolic link. -func IsSymlink(path string) (bool, error) { - l, err := os.Lstat(path) - if err != nil { - return false, err - } - - return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !isAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} - -func isAbs(path string) (b bool) { - v := volumeName(path) - if v == "" { - return false - } - path = path[len(v):] - if path == "" { - return false - } - return os.IsPathSeparator(path[0]) -} - -func volumeName(path string) (v string) { - if len(path) < 2 { - return "" - } - // with drive letter - c := path[0] - if path[1] == ':' && - ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z') { - return path[:2] - } - // is it UNC - if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && - !os.IsPathSeparator(path[2]) && path[2] != '.' { - // first, leading `\\` and next shouldn't be `\`. its server name. - for n := 3; n < l-1; n++ { - // second, next '\' shouldn't be repeated. - if os.IsPathSeparator(path[n]) { - n++ - // third, following something characters. its share name. - if !os.IsPathSeparator(path[n]) { - if path[n] == '.' { - break - } - for ; n < l; n++ { - if os.IsPathSeparator(path[n]) { - break - } - } - return path[:n] - } - break - } - } - } - return "" -} diff --git a/internal/fs/fs_test.go b/internal/fs/fs_test.go deleted file mode 100644 index eba87eba0..000000000 --- a/internal/fs/fs_test.go +++ /dev/null @@ -1,657 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync" - "testing" -) - -var ( - mu sync.Mutex -) - -func TestRenameWithFallback(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - if err = RenameWithFallback(filepath.Join(dir, "does_not_exists"), filepath.Join(dir, "dst")); err == nil { - t.Fatal("expected an error for non existing file, but got nil") - } - - srcpath := filepath.Join(dir, "src") - - if srcf, err := os.Create(srcpath); err != nil { - t.Fatal(err) - } else { - srcf.Close() - } - - if err = RenameWithFallback(srcpath, filepath.Join(dir, "dst")); err != nil { - t.Fatal(err) - } - - srcpath = filepath.Join(dir, "a") - if err = os.MkdirAll(srcpath, 0777); err != nil { - t.Fatal(err) - } - - dstpath := filepath.Join(dir, "b") - if err = os.MkdirAll(dstpath, 0777); err != nil { - t.Fatal(err) - } - - if err = RenameWithFallback(srcpath, dstpath); err == nil { - t.Fatal("expected an error if dst is an existing directory, but got nil") - } -} - -func TestCopyDir(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir := filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - files := []struct { - path string - contents string - fi os.FileInfo - }{ - {path: "myfile", contents: "hello world"}, - {path: filepath.Join("subdir", "file"), contents: "subdir file"}, - } - - // Create structure indicated in 'files' - for i, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - if err = os.MkdirAll(dn, 0755); err != nil { - t.Fatal(err) - } - - fh, err := os.Create(fn) - if err != nil { - t.Fatal(err) - } - - if _, err = fh.Write([]byte(file.contents)); err != nil { - t.Fatal(err) - } - fh.Close() - - files[i].fi, err = os.Stat(fn) - if err != nil { - t.Fatal(err) - } - } - - destdir := filepath.Join(dir, "dest") - if err := CopyDir(srcdir, destdir); err != nil { - t.Fatal(err) - } - - // Compare copy against structure indicated in 'files' - for _, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - dirOK, err := IsDir(dn) - if err != nil { - t.Fatal(err) - } - if !dirOK { - t.Fatalf("expected %s to be a directory", dn) - } - - got, err := ioutil.ReadFile(fn) - if err != nil { - t.Fatal(err) - } - - if file.contents != string(got) { - t.Fatalf("expected: %s, got: %s", file.contents, string(got)) - } - - gotinfo, err := os.Stat(fn) - if err != nil { - t.Fatal(err) - } - - if file.fi.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", - file.path, file.fi.Mode(), fn, gotinfo.Mode()) - } - } -} - -func TestCopyDirFail_SrcInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - cleanup := setupInaccessibleDir(t, func(dir string) error { - srcdir = filepath.Join(dir, "src") - return os.MkdirAll(srcdir, 0755) - }) - defer cleanup() - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - dstdir = filepath.Join(dir, "dst") - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_DstInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir = filepath.Join(dir, "src") - if err = os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - cleanup := setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dst") - return nil - }) - defer cleanup() - - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_SrcIsNotDir(t *testing.T) { - var srcdir, dstdir string - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir = filepath.Join(dir, "src") - if _, err = os.Create(srcdir); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errSrcNotDir { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err) - } - -} - -func TestCopyDirFail_DstExists(t *testing.T) { - var srcdir, dstdir string - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir = filepath.Join(dir, "src") - if err = os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - if err = os.MkdirAll(dstdir, 0755); err != nil { - t.Fatal(err) - } - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errDstExist { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err) - } -} - -func TestCopyDirFailOpen(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. os.Chmod(..., 0222) below is not - // enough for the file to be readonly, and os.Chmod(..., - // 0000) returns an invalid argument error. Skipping - // this this until a compatible implementation is - // provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcdir = filepath.Join(dir, "src") - if err = os.MkdirAll(srcdir, 0755); err != nil { - t.Fatal(err) - } - - srcfn := filepath.Join(srcdir, "file") - srcf, err := os.Create(srcfn) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - // setup source file so that it cannot be read - if err = os.Chmod(srcfn, 0222); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyFile(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - - want := "hello world" - if _, err := srcf.Write([]byte(want)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destf := filepath.Join(dir, "destf") - if err := copyFile(srcf.Name(), destf); err != nil { - t.Fatal(err) - } - - got, err := ioutil.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if want != string(got) { - t.Fatalf("expected: %s, got: %s", want, string(got)) - } - - wantinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - gotinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if wantinfo.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode()) - } -} - -func TestCopyFileSymlink(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer cleanUpDir(dir) - - testcases := map[string]string{ - filepath.Join("./testdata/symlinks/file-symlink"): filepath.Join(dir, "dst-file"), - filepath.Join("./testdata/symlinks/windows-file-symlink"): filepath.Join(dir, "windows-dst-file"), - filepath.Join("./testdata/symlinks/invalid-symlink"): filepath.Join(dir, "invalid-symlink"), - } - - for symlink, dst := range testcases { - t.Run(symlink, func(t *testing.T) { - var err error - if err = copyFile(symlink, dst); err != nil { - t.Fatalf("failed to copy symlink: %s", err) - } - - var want, got string - - if runtime.GOOS == "windows" { - // Creating symlinks on Windows require an additional permission - // regular users aren't granted usually. So we copy the file - // content as a fall back instead of creating a real symlink. - srcb, err := ioutil.ReadFile(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - dstb, err := ioutil.ReadFile(dst) - if err != nil { - t.Fatalf("%+v", err) - } - - want = string(srcb) - got = string(dstb) - } else { - want, err = os.Readlink(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - - got, err = os.Readlink(dst) - if err != nil { - t.Fatalf("could not resolve symlink: %s", err) - } - } - - if want != got { - t.Fatalf("resolved path is incorrect. expected %s, got %s", want, got) - } - }) - } -} - -func TestCopyFileLongFilePath(t *testing.T) { - if runtime.GOOS != "windows" { - // We want to ensure the temporary fix actually fixes the issue with - // os.Chmod and long file paths. This is only applicable on Windows. - t.Skip("skipping on non-windows") - } - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer cleanUpDir(dir) - - // Create a directory with a long-enough path name to cause the bug in #774. - dirName := "" - for len(dir+string(os.PathSeparator)+dirName) <= 300 { - dirName += "directory" - } - - fullPath := filepath.Join(dir, dirName, string(os.PathSeparator)) - if err := os.MkdirAll(fullPath, 0755); err != nil && !os.IsExist(err) { - t.Fatalf("%+v", fmt.Errorf("unable to create temp directory: %s", fullPath)) - } - - err = ioutil.WriteFile(fullPath+"src", []byte(nil), 0644) - if err != nil { - t.Fatalf("%+v", err) - } - - err = copyFile(fullPath+"src", fullPath+"dst") - if err != nil { - t.Fatalf("unexpected error while copying file: %v", err) - } -} - -// C:\Users\appveyor\AppData\Local\Temp\1\gotest639065787\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890 - -func TestCopyFileFail(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - var dstdir string - - cleanup := setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dir") - return os.Mkdir(dstdir, 0777) - }) - defer cleanup() - - fn := filepath.Join(dstdir, "file") - if err := copyFile(srcf.Name(), fn); err == nil { - t.Fatalf("expected error for %s, got none", fn) - } -} - -// setupInaccessibleDir creates a temporary location with a single -// directory in it, in such a way that that directory is not accessible -// after this function returns. -// -// op is called with the directory as argument, so that it can create -// files or other test artifacts. -// -// If setupInaccessibleDir fails in its preparation, or op fails, t.Fatal -// will be invoked. -// -// This function returns a cleanup function that removes all the temporary -// files this function creates. It is the caller's responsibility to call -// this function before the test is done running, whether there's an error or not. -func setupInaccessibleDir(t *testing.T, op func(dir string) error) func() { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - return nil // keep compiler happy - } - - subdir := filepath.Join(dir, "dir") - - cleanup := func() { - if err := os.Chmod(subdir, 0777); err != nil { - t.Error(err) - } - if err := os.RemoveAll(dir); err != nil { - t.Error(err) - } - } - - if err := os.Mkdir(subdir, 0777); err != nil { - cleanup() - t.Fatal(err) - return nil - } - - if err := op(subdir); err != nil { - cleanup() - t.Fatal(err) - return nil - } - - if err := os.Chmod(subdir, 0666); err != nil { - cleanup() - t.Fatal(err) - return nil - } - - return cleanup -} - -func TestIsDir(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - var dn string - - cleanup := setupInaccessibleDir(t, func(dir string) error { - dn = filepath.Join(dir, "dir") - return os.Mkdir(dn, 0777) - }) - defer cleanup() - - tests := map[string]struct { - exists bool - err bool - }{ - wd: {true, false}, - filepath.Join(wd, "testdata"): {true, false}, - filepath.Join(wd, "main.go"): {false, true}, - filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true}, - dn: {false, true}, - } - - if runtime.GOOS == "windows" { - // This test doesn't work on Microsoft Windows because - // of the differences in how file permissions are - // implemented. For this to work, the directory where - // the directory exists should be inaccessible. - delete(tests, dn) - } - - for f, want := range tests { - got, err := IsDir(f) - if err != nil && !want.err { - t.Fatalf("expected no error, got %v", err) - } - - if got != want.exists { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - } -} - -func TestIsSymlink(t *testing.T) { - dir, err := ioutil.TempDir("", "dep") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - dirPath := filepath.Join(dir, "directory") - if err = os.MkdirAll(dirPath, 0777); err != nil { - t.Fatal(err) - } - - filePath := filepath.Join(dir, "file") - f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } - f.Close() - - dirSymlink := filepath.Join(dir, "dirSymlink") - fileSymlink := filepath.Join(dir, "fileSymlink") - - if err = os.Symlink(dirPath, dirSymlink); err != nil { - t.Fatal(err) - } - if err = os.Symlink(filePath, fileSymlink); err != nil { - t.Fatal(err) - } - - var ( - inaccessibleFile string - inaccessibleSymlink string - ) - - cleanup := setupInaccessibleDir(t, func(dir string) error { - inaccessibleFile = filepath.Join(dir, "file") - if fh, err := os.Create(inaccessibleFile); err != nil { - return err - } else if err = fh.Close(); err != nil { - return err - } - - inaccessibleSymlink = filepath.Join(dir, "symlink") - return os.Symlink(inaccessibleFile, inaccessibleSymlink) - }) - defer cleanup() - - tests := map[string]struct{ expected, err bool }{ - dirPath: {false, false}, - filePath: {false, false}, - dirSymlink: {true, false}, - fileSymlink: {true, false}, - inaccessibleFile: {false, true}, - inaccessibleSymlink: {false, true}, - } - - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in Windows. Skipping - // these cases until a compatible implementation is provided. - delete(tests, inaccessibleFile) - delete(tests, inaccessibleSymlink) - } - - for path, want := range tests { - got, err := IsSymlink(path) - if err != nil { - if !want.err { - t.Errorf("expected no error, got %v", err) - } - } - - if got != want.expected { - t.Errorf("expected %t for %s, got %t", want.expected, path, got) - } - } -} - -func cleanUpDir(dir string) { - if runtime.GOOS == "windows" { - mu.Lock() - exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run() - mu.Unlock() - } - if dir != "" { - os.RemoveAll(dir) - } -} diff --git a/internal/fs/rename.go b/internal/fs/rename.go deleted file mode 100644 index a1b4a411d..000000000 --- a/internal/fs/rename.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } else if terr.Err != syscall.EXDEV { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/rename_windows.go b/internal/fs/rename_windows.go deleted file mode 100644 index 3b5650573..000000000 --- a/internal/fs/rename_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } - - if terr.Err != syscall.EXDEV { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr != 0x11 { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/testdata/symlinks/dir-symlink b/internal/fs/testdata/symlinks/dir-symlink deleted file mode 120000 index 777ebd014..000000000 --- a/internal/fs/testdata/symlinks/dir-symlink +++ /dev/null @@ -1 +0,0 @@ -../../testdata \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/file-symlink b/internal/fs/testdata/symlinks/file-symlink deleted file mode 120000 index 4c52274de..000000000 --- a/internal/fs/testdata/symlinks/file-symlink +++ /dev/null @@ -1 +0,0 @@ -../test.file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/invalid-symlink b/internal/fs/testdata/symlinks/invalid-symlink deleted file mode 120000 index 0edf4f301..000000000 --- a/internal/fs/testdata/symlinks/invalid-symlink +++ /dev/null @@ -1 +0,0 @@ -/non/existing/file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/windows-file-symlink b/internal/fs/testdata/symlinks/windows-file-symlink deleted file mode 120000 index af1d6c8f5..000000000 --- a/internal/fs/testdata/symlinks/windows-file-symlink +++ /dev/null @@ -1 +0,0 @@ -C:/Users/ibrahim/go/src/github.com/golang/dep/internal/fs/testdata/test.file \ No newline at end of file diff --git a/internal/helm/chart.go b/internal/helm/chart.go deleted file mode 100644 index 6630f4f74..000000000 --- a/internal/helm/chart.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "fmt" - "reflect" - - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chartutil" -) - -// OverwriteChartDefaultValues overwrites the chart default values file with the -// given data. -func OverwriteChartDefaultValues(chart *helmchart.Chart, data []byte) (bool, error) { - // Read override values file data - values, err := chartutil.ReadValues(data) - if err != nil { - return false, fmt.Errorf("failed to parse provided override values file data") - } - - // Replace current values file in Raw field - for _, f := range chart.Raw { - if f.Name == chartutil.ValuesfileName { - // Do nothing if contents are equal - if reflect.DeepEqual(f.Data, data) { - return false, nil - } - - // Replace in Files field - for _, f := range chart.Files { - if f.Name == chartutil.ValuesfileName { - f.Data = data - } - } - - f.Data = data - chart.Values = values - return true, nil - } - } - - // This should never happen, helm charts must have a values.yaml file to be valid - return false, fmt.Errorf("failed to locate values file: %s", chartutil.ValuesfileName) -} diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go new file mode 100644 index 000000000..6ac896e78 --- /dev/null +++ b/internal/helm/chart/builder.go @@ -0,0 +1,226 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + sourcefs "github.com/fluxcd/pkg/oci" + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + + "github.com/fluxcd/source-controller/internal/oci" +) + +// Reference holds information to locate a chart. +type Reference interface { + // Validate returns an error if the Reference is not valid according + // to the spec of the interface implementation. + Validate() error +} + +// LocalReference contains sufficient information to locate a chart on the +// local filesystem. +type LocalReference struct { + // WorkDir used as chroot during build operations. + // File references are not allowed to traverse outside it. + WorkDir string + // Path of the chart on the local filesystem relative to WorkDir. + Path string +} + +// Validate returns an error if the LocalReference does not have +// a Path set. +func (r LocalReference) Validate() error { + if r.WorkDir == "" { + return fmt.Errorf("no work dir set for local chart reference") + } + if r.Path == "" { + return fmt.Errorf("no path set for local chart reference") + } + if !filepath.IsAbs(r.WorkDir) { + return fmt.Errorf("local chart reference work dir is expected to be absolute") + } + if filepath.IsAbs(r.Path) { + return fmt.Errorf("local chart reference path is expected to be relative") + } + return nil +} + +// RemoteReference contains sufficient information to look up a chart in +// a ChartRepository. +type RemoteReference struct { + // Name of the chart. + Name string + // Version of the chart. + // Can be a Semver range, or empty for latest. + Version string +} + +// Validate returns an error if the RemoteReference does not have +// a Name set. +func (r RemoteReference) Validate() error { + if r.Name == "" { + return fmt.Errorf("no name set for remote chart reference") + } + name := regexp.MustCompile(`^([-a-z0-9]+/?\.?)+$`) + if !name.MatchString(r.Name) { + return fmt.Errorf("invalid chart name '%s': a valid name must be lower case letters and numbers and MAY be separated with dashes (-), slashes (/) or periods (.)", r.Name) + } + return nil +} + +// Builder is capable of building a (specific) chart Reference. +type Builder interface { + // Build pulls and (optionally) packages a Helm chart with the given + // Reference and BuildOptions, and writes it to p. + // It returns the Build result, or an error. + // It may return an error for unsupported Reference implementations. + Build(ctx context.Context, ref Reference, p string, opts BuildOptions) (*Build, error) +} + +// BuildOptions provides a list of options for Builder.Build. +type BuildOptions struct { + // VersionMetadata can be set to SemVer build metadata as defined in + // the spec, and is included during packaging. + // Ref: https://semver.org/#spec-item-10 + VersionMetadata string + // ValuesFiles can be set to a list of relative paths, used to compose + // and overwrite an alternative default "values.yaml" for the chart. + ValuesFiles []string + // CachedChartValuesFiles is a list of relative paths that were used to + // build the cached chart. + CachedChartValuesFiles []string + // IgnoreMissingValuesFiles controls whether to silently ignore missing + // values files rather than failing. + IgnoreMissingValuesFiles bool + // CachedChart can be set to the absolute path of a chart stored on + // the local filesystem, and is used for simple validation by metadata + // comparisons. + CachedChart string + // Force can be set to force the build of the chart, for example + // because the list of ValuesFiles has changed. + Force bool + // Verifier can be set to the verification of the chart. + Verify bool +} + +// GetValuesFiles returns BuildOptions.ValuesFiles, except if it equals +// "values.yaml", which returns nil. +func (o BuildOptions) GetValuesFiles() []string { + if len(o.ValuesFiles) == 1 && filepath.Clean(o.ValuesFiles[0]) == filepath.Clean(chartutil.ValuesfileName) { + return nil + } + return o.ValuesFiles +} + +// Build contains the (partial) Builder.Build result, including specific +// information about the built chart like ResolvedDependencies. +type Build struct { + // Name of the chart. + Name string + // Version of the chart. + Version string + // Path is the absolute path to the packaged chart. + // Can be empty, in which case a failure should be assumed. + Path string + // ValuesFiles is the list of files used to compose the chart's + // default "values.yaml". + ValuesFiles []string + // ResolvedDependencies is the number of local and remote dependencies + // collected by the DependencyManager before building the chart. + ResolvedDependencies int + // Packaged indicates if the Builder has packaged the chart. + // This can for example be false if ValuesFiles is empty and the chart + // source was already packaged. + Packaged bool + // VerifiedResult indicates the results of verifying the chart. + // If no verification was performed, this field should be VerificationResultIgnored. + VerifiedResult oci.VerificationResult +} + +// Summary returns a human-readable summary of the Build. +func (b *Build) Summary() string { + if !b.HasMetadata() { + return "no chart build" + } + + var s strings.Builder + + var action = "new" + if b.Path != "" { + action = "pulled" + if b.Packaged { + action = "packaged" + } + } + s.WriteString(fmt.Sprintf("%s '%s' chart with version '%s'", action, b.Name, b.Version)) + + if len(b.ValuesFiles) > 0 { + s.WriteString(fmt.Sprintf(" and merged values files %v", b.ValuesFiles)) + } + + return s.String() +} + +// HasMetadata returns if the Build contains chart metadata. +// +// NOTE: This may return True while the build did not Complete successfully. +// Which means it was able to successfully collect the metadata from the chart, +// but failed further into the process. +func (b *Build) HasMetadata() bool { + if b == nil { + return false + } + return b.Name != "" && b.Version != "" +} + +// Complete returns if the Build completed successfully. +func (b *Build) Complete() bool { + return b.HasMetadata() && b.Path != "" +} + +// String returns the Path of the Build. +func (b *Build) String() string { + if b == nil { + return "" + } + return b.Path +} + +// packageToPath attempts to package the given chart to the out filepath. +func packageToPath(chart *helmchart.Chart, out string) error { + o, err := os.MkdirTemp("", "chart-build-*") + if err != nil { + return fmt.Errorf("failed to create temporary directory for chart: %w", err) + } + defer os.RemoveAll(o) + + p, err := chartutil.Save(chart, o) + if err != nil { + return fmt.Errorf("failed to package chart: %w", err) + } + if err = sourcefs.RenameWithFallback(p, out); err != nil { + return fmt.Errorf("failed to write chart to file: %w", err) + } + return nil +} diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go new file mode 100644 index 000000000..44399a80a --- /dev/null +++ b/internal/helm/chart/builder_local.go @@ -0,0 +1,252 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/Masterminds/semver/v3" + securejoin "github.com/cyphar/filepath-securejoin" + "sigs.k8s.io/yaml" + + "github.com/fluxcd/pkg/runtime/transform" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" +) + +type localChartBuilder struct { + dm *DependencyManager +} + +// NewLocalBuilder returns a Builder capable of building a Helm chart with a +// LocalReference. For chart references pointing to a directory, the +// DependencyManager is used to resolve missing local and remote dependencies. +func NewLocalBuilder(dm *DependencyManager) Builder { + return &localChartBuilder{ + dm: dm, + } +} + +// Build attempts to build a Helm chart with the given LocalReference and +// BuildOptions, writing it to p. +// It returns a Build describing the produced (or from cache observed) chart +// written to p, or a BuildError. +// +// The chart is loaded from the LocalReference.Path, and only packaged if the +// version (including BuildOptions.VersionMetadata modifications) differs from +// the current BuildOptions.CachedChart. +// +// BuildOptions.ValuesFiles changes are in this case not taken into account, +// and BuildOptions.Force should be used to enforce a rebuild. +// +// If the LocalReference.Path refers to an already packaged chart, and no +// packaging is required due to BuildOptions modifying the chart, +// LocalReference.Path is copied to p. +// +// If the LocalReference.Path refers to a chart directory, dependencies are +// confirmed to be present using the DependencyManager, while attempting to +// resolve any missing. +func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, opts BuildOptions) (*Build, error) { + localRef, ok := ref.(LocalReference) + if !ok { + err := fmt.Errorf("expected local chart reference") + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + + if err := ref.Validate(); err != nil { + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + + // Load the chart metadata from the LocalReference to ensure it points + // to a chart + securePath, err := securejoin.SecureJoin(localRef.WorkDir, localRef.Path) + if err != nil { + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + curMeta, err := LoadChartMetadata(securePath) + if err != nil { + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + if err = curMeta.Validate(); err != nil { + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + + result := &Build{} + result.Name = curMeta.Name + + // Set build specific metadata if instructed + result.Version = curMeta.Version + if opts.VersionMetadata != "" { + ver, err := semver.NewVersion(curMeta.Version) + if err != nil { + err = fmt.Errorf("failed to parse version from chart metadata as SemVer: %w", err) + return nil, &BuildError{Reason: ErrChartMetadataPatch, Err: err} + } + if *ver, err = ver.SetMetadata(opts.VersionMetadata); err != nil { + err = fmt.Errorf("failed to set SemVer metadata on chart version: %w", err) + return nil, &BuildError{Reason: ErrChartMetadataPatch, Err: err} + } + result.Version = ver.String() + } + + isChartDir := pathIsDir(securePath) + requiresPackaging := isChartDir || opts.VersionMetadata != "" || len(opts.GetValuesFiles()) != 0 + + // If all the following is true, we do not need to package the chart: + // - Chart name from cached chart matches resolved name + // - Chart version from cached chart matches calculated version + // - BuildOptions.Force is False + if opts.CachedChart != "" && !opts.Force { + if curMeta, err = LoadChartMetadataFromArchive(opts.CachedChart); err == nil { + // If the cached metadata is corrupt, we ignore its existence + // and continue the build + if err = curMeta.Validate(); err == nil { + if result.Name == curMeta.Name && result.Version == curMeta.Version { + result.Path = opts.CachedChart + result.ValuesFiles = opts.GetValuesFiles() + if opts.CachedChartValuesFiles != nil { + // If the cached chart values files are set, we should use them + // instead of reporting the values files. + result.ValuesFiles = opts.CachedChartValuesFiles + } + result.Packaged = requiresPackaging + + return result, nil + } + } + } + } + + // If the chart at the path is already packaged and no custom values files + // options are set, we can copy the chart without making modifications + if !requiresPackaging { + if err = copyFileToPath(securePath, p); err != nil { + return result, &BuildError{Reason: ErrChartPull, Err: err} + } + result.Path = p + return result, nil + } + + // Merge chart values, if instructed + var ( + mergedValues map[string]interface{} + valuesFiles []string + ) + if len(opts.GetValuesFiles()) > 0 { + if mergedValues, valuesFiles, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles, opts.IgnoreMissingValuesFiles); err != nil { + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + } + } + + // At this point we are certain we need to load the chart; + // either to package it because it originates from a directory, + // or because we have merged values and need to repackage + loadedChart, err := secureloader.Load(localRef.WorkDir, localRef.Path) + if err != nil { + return result, &BuildError{Reason: ErrChartPackage, Err: err} + } + + // Set earlier resolved version (with metadata) + loadedChart.Metadata.Version = result.Version + + // Overwrite default values with merged values, if any + if ok, err = OverwriteChartDefaultValues(loadedChart, mergedValues); ok || err != nil { + if err != nil { + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + } + result.ValuesFiles = valuesFiles + } + + // Ensure dependencies are fetched if building from a directory + if isChartDir { + if b.dm == nil { + err = fmt.Errorf("local chart builder requires dependency manager for unpackaged charts") + return result, &BuildError{Reason: ErrDependencyBuild, Err: err} + } + if result.ResolvedDependencies, err = b.dm.Build(ctx, ref, loadedChart); err != nil { + return result, &BuildError{Reason: ErrDependencyBuild, Err: err} + } + } + + // Package the chart + if err = packageToPath(loadedChart, p); err != nil { + return result, &BuildError{Reason: ErrChartPackage, Err: err} + } + result.Path = p + result.Packaged = requiresPackaging + return result, nil +} + +// mergeFileValues merges the given value file paths into a single "values.yaml" map. +// The provided (relative) paths may not traverse outside baseDir. By default, a missing +// file is considered an error. If ignoreMissing is true, missing files are ignored. +// It returns the merge result and the list of files that contributed to that result, +// or an error. +func mergeFileValues(baseDir string, paths []string, ignoreMissing bool) (map[string]interface{}, []string, error) { + mergedValues := make(map[string]interface{}) + valuesFiles := make([]string, 0, len(paths)) + for _, p := range paths { + secureP, err := securejoin.SecureJoin(baseDir, p) + if err != nil { + return nil, nil, err + } + f, err := os.Stat(secureP) + switch { + case err != nil: + if ignoreMissing && os.IsNotExist(err) { + continue + } + fallthrough + case !f.Mode().IsRegular(): + return nil, nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", + strings.TrimPrefix(secureP, baseDir), p) + } + b, err := os.ReadFile(secureP) + if err != nil { + return nil, nil, fmt.Errorf("could not read values from file '%s': %w", p, err) + } + values := make(map[string]interface{}) + err = yaml.Unmarshal(b, &values) + if err != nil { + return nil, nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) + } + mergedValues = transform.MergeMaps(mergedValues, values) + valuesFiles = append(valuesFiles, p) + } + return mergedValues, valuesFiles, nil +} + +// copyFileToPath attempts to copy in to out. It returns an error if out already exists. +func copyFileToPath(in, out string) error { + o, err := os.Create(out) + if err != nil { + return fmt.Errorf("failed to create copy target: %w", err) + } + defer o.Close() + i, err := os.Open(in) + if err != nil { + return fmt.Errorf("failed to open file to copy from: %w", err) + } + defer i.Close() + if _, err := o.ReadFrom(i); err != nil { + return fmt.Errorf("failed to read from source during copy: %w", err) + } + return nil +} diff --git a/internal/helm/chart/builder_local_test.go b/internal/helm/chart/builder_local_test.go new file mode 100644 index 000000000..4b26e1419 --- /dev/null +++ b/internal/helm/chart/builder_local_test.go @@ -0,0 +1,413 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + . "github.com/onsi/gomega" + "github.com/otiai10/copy" + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/repository" +) + +func TestLocalBuilder_Build(t *testing.T) { + g := NewWithT(t) + + // Prepare chart repositories to be used for charts with remote dependency. + chartB, err := os.ReadFile("./../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartB).ToNot(BeEmpty()) + mockRepo := func() *repository.ChartRepository { + return &repository.ChartRepository{ + Client: &mockGetter{ + Response: chartB, + }, + Index: &repo.IndexFile{ + Entries: map[string]repo.ChartVersions{ + "grafana": { + &repo.ChartVersion{ + Metadata: &helmchart.Metadata{ + Name: "grafana", + Version: "6.17.4", + }, + URLs: []string{"https://example.com/grafana.tgz"}, + }, + }, + }, + }, + RWMutex: &sync.RWMutex{}, + } + } + + tests := []struct { + name string + reference Reference + buildOpts BuildOptions + valuesFiles []helmchart.File + repositories map[string]repository.Downloader + dependentChartPaths []string + wantValues chartutil.Values + wantVersion string + wantPackaged bool + wantErr string + }{ + { + name: "invalid reference", + reference: RemoteReference{}, + wantErr: "expected local chart reference", + }, + { + name: "invalid local reference - no path", + reference: LocalReference{}, + wantErr: "no path set for local chart reference", + }, + { + name: "invalid local reference - no file", + reference: LocalReference{WorkDir: "/tmp", Path: "non-existent-path.xyz"}, + wantErr: "no such file or directory", + }, + { + name: "invalid version metadata", + reference: LocalReference{Path: "../testdata/charts/helmchart"}, + buildOpts: BuildOptions{VersionMetadata: "^"}, + wantErr: "invalid metadata string", + }, + { + name: "with version metadata", + reference: LocalReference{Path: "../testdata/charts/helmchart"}, + buildOpts: BuildOptions{VersionMetadata: "foo"}, + wantVersion: "0.1.0+foo", + wantPackaged: true, + }, + { + name: "already packaged chart", + reference: LocalReference{Path: "../testdata/charts/helmchart-0.1.0.tgz"}, + wantVersion: "0.1.0", + wantPackaged: false, + }, + { + name: "default values", + reference: LocalReference{Path: "../testdata/charts/helmchart"}, + wantValues: chartutil.Values{ + "replicaCount": float64(1), + }, + wantVersion: "0.1.0", + wantPackaged: true, + }, + { + name: "with values files", + reference: LocalReference{Path: "../testdata/charts/helmchart"}, + buildOpts: BuildOptions{ + ValuesFiles: []string{"custom-values1.yaml", "custom-values2.yaml"}, + }, + valuesFiles: []helmchart.File{ + { + Name: "custom-values1.yaml", + Data: []byte(`replicaCount: 11 +nameOverride: "foo-name-override"`), + }, + { + Name: "custom-values2.yaml", + Data: []byte(`replicaCount: 20 +fullnameOverride: "full-foo-name-override"`), + }, + }, + wantValues: chartutil.Values{ + "replicaCount": float64(20), + "nameOverride": "foo-name-override", + "fullnameOverride": "full-foo-name-override", + }, + wantVersion: "0.1.0", + wantPackaged: true, + }, + { + name: "chart with dependencies", + reference: LocalReference{Path: "../testdata/charts/helmchartwithdeps"}, + repositories: map[string]repository.Downloader{ + "https://grafana.github.io/helm-charts/": mockRepo(), + }, + dependentChartPaths: []string{"./../testdata/charts/helmchart"}, + wantVersion: "0.1.0", + wantPackaged: true, + }, + { + name: "v1 chart", + reference: LocalReference{Path: "./../testdata/charts/helmchart-v1"}, + wantValues: chartutil.Values{ + "replicaCount": float64(1), + }, + wantVersion: "0.2.0", + wantPackaged: true, + }, + { + name: "v1 chart with dependencies", + reference: LocalReference{Path: "../testdata/charts/helmchartwithdeps-v1"}, + repositories: map[string]repository.Downloader{ + "https://grafana.github.io/helm-charts/": mockRepo(), + }, + dependentChartPaths: []string{"../testdata/charts/helmchart-v1"}, + wantVersion: "0.3.0", + wantPackaged: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workDir := t.TempDir() + + // Only if the reference is a LocalReference, set the WorkDir. + localRef, ok := tt.reference.(LocalReference) + if ok { + // If the source chart path is valid, copy it into the workdir + // and update the localRef.Path with the copied local chart + // path. + if localRef.Path != "" { + _, err := os.Lstat(localRef.Path) + if err == nil { + helmchartDir := filepath.Join(workDir, "testdata", "charts", filepath.Base(localRef.Path)) + g.Expect(copy.Copy(localRef.Path, helmchartDir)).ToNot(HaveOccurred()) + } + } + localRef.WorkDir = workDir + tt.reference = localRef + } + + // Write value file in the base dir. + for _, f := range tt.valuesFiles { + vPath := filepath.Join(localRef.WorkDir, f.Name) + g.Expect(os.WriteFile(vPath, f.Data, 0o640)).ToNot(HaveOccurred()) + } + + // Write chart dependencies in the base dir. + for _, dcp := range tt.dependentChartPaths { + // Construct the chart path relative to the testdata chart. + helmchartDir := filepath.Join(workDir, "testdata", "charts", filepath.Base(dcp)) + g.Expect(copy.Copy(dcp, helmchartDir)).ToNot(HaveOccurred()) + } + + // Target path with name similar to the workDir. + targetPath := workDir + ".tgz" + + dm := NewDependencyManager( + WithRepositories(tt.repositories), + ) + + b := NewLocalBuilder(dm) + cb, err := b.Build(context.TODO(), tt.reference, targetPath, tt.buildOpts) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(cb).To(BeZero()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Packaged).To(Equal(tt.wantPackaged), "unexpected Build.Packaged value") + g.Expect(cb.Path).ToNot(BeEmpty(), "empty Build.Path") + + // Load the resulting chart and verify the values. + resultChart, err := secureloader.LoadFile(cb.Path) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resultChart.Metadata.Version).To(Equal(tt.wantVersion)) + + for k, v := range tt.wantValues { + g.Expect(v).To(Equal(resultChart.Values[k])) + } + }) + } +} + +func TestLocalBuilder_Build_CachedChart(t *testing.T) { + g := NewWithT(t) + + workDir := t.TempDir() + + testChartPath := "./../testdata/charts/helmchart" + + dm := NewDependencyManager() + b := NewLocalBuilder(dm) + + tmpDir := t.TempDir() + + // Copy the source chart into the workdir. + g.Expect(copy.Copy(testChartPath, filepath.Join(workDir, "testdata", "charts", filepath.Base("helmchart")))).ToNot(HaveOccurred()) + + reference := LocalReference{WorkDir: workDir, Path: testChartPath} + + // Build first time. + targetPath := filepath.Join(tmpDir, "chart1.tgz") + buildOpts := BuildOptions{} + cb, err := b.Build(context.TODO(), reference, targetPath, buildOpts) + g.Expect(err).ToNot(HaveOccurred()) + + // Set the result as the CachedChart for second build. + buildOpts.CachedChart = cb.Path + + targetPath2 := filepath.Join(tmpDir, "chart2.tgz") + cb, err = b.Build(context.TODO(), reference, targetPath2, buildOpts) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Path).To(Equal(targetPath)) + + // Rebuild with build option Force. + buildOpts.Force = true + cb, err = b.Build(context.TODO(), reference, targetPath2, buildOpts) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Path).To(Equal(targetPath2)) +} + +func Test_mergeFileValues(t *testing.T) { + tests := []struct { + name string + files []*helmchart.File + paths []string + ignoreMissing bool + wantValues map[string]interface{} + wantFiles []string + wantErr string + }{ + { + name: "merges values from files", + files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + {Name: "b.yaml", Data: []byte("b: c")}, + {Name: "c.yaml", Data: []byte("b: d")}, + }, + paths: []string{"a.yaml", "b.yaml", "c.yaml"}, + wantValues: map[string]interface{}{ + "a": "b", + "b": "d", + }, + wantFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, + }, + { + name: "illegal traverse", + paths: []string{"../../../traversing/illegally/a/p/a/b"}, + wantErr: "no values file found at path '/traversing/illegally/a/p/a/b'", + }, + { + name: "unmarshal error", + files: []*helmchart.File{ + {Name: "invalid", Data: []byte("abcd")}, + }, + paths: []string{"invalid"}, + wantErr: "unmarshaling values from 'invalid' failed", + }, + { + name: "error on invalid path", + paths: []string{"a.yaml"}, + wantErr: "no values file found at path '/a.yaml'", + }, + { + name: "ignore missing files", + files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + }, + paths: []string{"a.yaml", "b.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{"a.yaml"}, + }, + { + name: "all files missing", + paths: []string{"a.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{}, + wantFiles: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + baseDir := t.TempDir() + + for _, f := range tt.files { + g.Expect(os.WriteFile(filepath.Join(baseDir, f.Name), f.Data, 0o640)).To(Succeed()) + } + + gotValues, gotFiles, err := mergeFileValues(baseDir, tt.paths, tt.ignoreMissing) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(gotValues).To(BeNil()) + g.Expect(gotFiles).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(gotValues).To(Equal(tt.wantValues)) + g.Expect(gotFiles).To(Equal(tt.wantFiles)) + }) + } +} + +func Test_copyFileToPath(t *testing.T) { + tests := []struct { + name string + in string + wantErr string + }{ + { + name: "copies input file", + in: "../testdata/local-index.yaml", + }, + { + name: "invalid input file", + in: "../testdata/invalid.tgz", + wantErr: "failed to open file to copy from", + }, + { + name: "invalid input directory", + in: "../testdata/charts", + wantErr: "failed to read from source during copy", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + out := tmpFile("copy-0.1.0", ".tgz") + defer os.RemoveAll(out) + err := copyFileToPath(tt.in, out) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).To(BeARegularFile()) + f1, err := os.ReadFile(tt.in) + g.Expect(err).ToNot(HaveOccurred()) + f2, err := os.ReadFile(out) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f2).To(Equal(f1)) + }) + } +} diff --git a/internal/helm/chart/builder_remote.go b/internal/helm/chart/builder_remote.go new file mode 100644 index 000000000..2cfdf81b4 --- /dev/null +++ b/internal/helm/chart/builder_remote.go @@ -0,0 +1,309 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/Masterminds/semver/v3" + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/repo" + "sigs.k8s.io/yaml" + + sourcefs "github.com/fluxcd/pkg/oci" + "github.com/fluxcd/pkg/runtime/transform" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/repository" + "github.com/fluxcd/source-controller/internal/oci" +) + +type remoteChartBuilder struct { + remote repository.Downloader +} + +// NewRemoteBuilder returns a Builder capable of building a Helm +// chart with a RemoteReference in the given repository.Downloader. +func NewRemoteBuilder(repository repository.Downloader) Builder { + return &remoteChartBuilder{ + remote: repository, + } +} + +// Build attempts to build a Helm chart with the given RemoteReference and +// BuildOptions, writing it to p. +// It returns a Build describing the produced (or from cache observed) chart +// written to p, or a BuildError. +// +// The latest version for the RemoteReference.Version is determined in the +// repository.ChartRepository, only downloading it if the version (including +// BuildOptions.VersionMetadata) differs from the current BuildOptions.CachedChart. +// BuildOptions.ValuesFiles changes are in this case not taken into account, +// and BuildOptions.Force should be used to enforce a rebuild. +// +// After downloading the chart, it is only packaged if required due to BuildOptions +// modifying the chart, otherwise the exact data as retrieved from the repository +// is written to p, after validating it to be a chart. +func (b *remoteChartBuilder) Build(ctx context.Context, ref Reference, p string, opts BuildOptions) (*Build, error) { + remoteRef, ok := ref.(RemoteReference) + if !ok { + err := fmt.Errorf("expected remote chart reference") + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + + if err := ref.Validate(); err != nil { + return nil, &BuildError{Reason: ErrChartReference, Err: err} + } + + res, result, err := b.downloadFromRepository(ctx, b.remote, remoteRef, opts) + if err != nil { + return nil, err + } + if res == nil { + return result, nil + } + + requiresPackaging := len(opts.GetValuesFiles()) != 0 || opts.VersionMetadata != "" + + // Use literal chart copy from remote if no custom values files options are + // set or version metadata isn't set. + if !requiresPackaging { + if err = validatePackageAndWriteToPath(res, p); err != nil { + return nil, &BuildError{Reason: ErrChartPull, Err: err} + } + result.Path = p + return result, nil + } + + // Load the chart and merge chart values + var chart *helmchart.Chart + if chart, err = secureloader.LoadArchive(res); err != nil { + err = fmt.Errorf("failed to load downloaded chart: %w", err) + return result, &BuildError{Reason: ErrChartPackage, Err: err} + } + chart.Metadata.Version = result.Version + + mergedValues, valuesFiles, err := mergeChartValues(chart, opts.ValuesFiles, opts.IgnoreMissingValuesFiles) + if err != nil { + err = fmt.Errorf("failed to merge chart values: %w", err) + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + } + // Overwrite default values with merged values, if any + if ok, err = OverwriteChartDefaultValues(chart, mergedValues); ok || err != nil { + if err != nil { + return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + } + result.ValuesFiles = valuesFiles + } + + // Package the chart with the custom values + if err = packageToPath(chart, p); err != nil { + return nil, &BuildError{Reason: ErrChartPackage, Err: err} + } + result.Path = p + result.Packaged = true + return result, nil +} + +func (b *remoteChartBuilder) downloadFromRepository(ctx context.Context, remote repository.Downloader, remoteRef RemoteReference, opts BuildOptions) (*bytes.Buffer, *Build, error) { + // Get the current version for the RemoteReference + cv, err := remote.GetChartVersion(remoteRef.Name, remoteRef.Version) + if err != nil { + var reason BuildErrorReason + switch err.(type) { + case *repository.ErrReference: + reason = ErrChartReference + case *repository.ErrExternal: + reason = ErrChartPull + default: + reason = ErrUnknown + } + err = fmt.Errorf("failed to get chart version for remote reference: %w", err) + return nil, nil, &BuildError{Reason: reason, Err: err} + } + + verifiedResult := oci.VerificationResultIgnored + + // Verify the chart if necessary + if opts.Verify { + if verifiedResult, err = remote.VerifyChart(ctx, cv); err != nil { + return nil, nil, &BuildError{Reason: ErrChartVerification, Err: err} + } + } + + result, shouldReturn, err := generateBuildResult(cv, opts) + if err != nil { + return nil, nil, err + } + + result.VerifiedResult = verifiedResult + + if shouldReturn { + return nil, result, nil + } + + // Download the package for the resolved version + res, err := remote.DownloadChart(cv) + if err != nil { + err = fmt.Errorf("failed to download chart for remote reference: %w", err) + return nil, nil, &BuildError{Reason: ErrChartPull, Err: err} + } + + return res, result, nil +} + +// generateBuildResult returns a Build object generated from the given chart version and build options. It also returns +// true if the given chart can be retrieved from cache and doesn't need to be downloaded again. +func generateBuildResult(cv *repo.ChartVersion, opts BuildOptions) (*Build, bool, error) { + result := &Build{} + result.Version = cv.Version + result.Name = cv.Name + result.VerifiedResult = oci.VerificationResultIgnored + + // Set build specific metadata if instructed + if opts.VersionMetadata != "" { + ver, err := setBuildMetaData(result.Version, opts.VersionMetadata) + if err != nil { + return nil, false, &BuildError{Reason: ErrChartMetadataPatch, Err: err} + } + result.Version = ver.String() + } + + requiresPackaging := len(opts.GetValuesFiles()) != 0 || opts.VersionMetadata != "" + + // If all the following is true, we do not need to download and/or build the chart: + // - Chart name from cached chart matches resolved name + // - Chart version from cached chart matches calculated version + // - BuildOptions.Force is False + if opts.CachedChart != "" && !opts.Force { + if curMeta, err := LoadChartMetadataFromArchive(opts.CachedChart); err == nil { + // If the cached metadata is corrupt, we ignore its existence + // and continue the build + if err = curMeta.Validate(); err == nil { + if result.Name == curMeta.Name && result.Version == curMeta.Version { + result.Path = opts.CachedChart + result.ValuesFiles = opts.GetValuesFiles() + if opts.CachedChartValuesFiles != nil { + // If the cached chart values files are set, we should use them + // instead of reporting the values files. + result.ValuesFiles = opts.CachedChartValuesFiles + } + result.Packaged = requiresPackaging + return result, true, nil + } + } + } + } + + return result, false, nil +} + +func setBuildMetaData(version, versionMetadata string) (*semver.Version, error) { + ver, err := semver.NewVersion(version) + if err != nil { + return nil, fmt.Errorf("failed to parse version from chart metadata as SemVer: %w", err) + } + if *ver, err = ver.SetMetadata(versionMetadata); err != nil { + return nil, fmt.Errorf("failed to set SemVer metadata on chart version: %w", err) + } + + return ver, nil +} + +// mergeChartValues merges the given chart.Chart Files paths into a single "values.yaml" map. +// By default, a missing file is considered an error. If ignoreMissing is set true, +// missing files are ignored. +// It returns the merge result and the list of files that contributed to that result, +// or an error. +func mergeChartValues(chart *helmchart.Chart, paths []string, ignoreMissing bool) (map[string]interface{}, []string, error) { + mergedValues := make(map[string]interface{}) + valuesFiles := make([]string, 0, len(paths)) + for _, p := range paths { + cfn := filepath.Clean(p) + if cfn == chartutil.ValuesfileName { + mergedValues = transform.MergeMaps(mergedValues, chart.Values) + valuesFiles = append(valuesFiles, p) + continue + } + var b []byte + for _, f := range chart.Files { + if f.Name == cfn { + b = f.Data + break + } + } + if b == nil { + if ignoreMissing { + continue + } + return nil, nil, fmt.Errorf("no values file found at path '%s'", p) + } + values := make(map[string]interface{}) + if err := yaml.Unmarshal(b, &values); err != nil { + return nil, nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) + } + mergedValues = transform.MergeMaps(mergedValues, values) + valuesFiles = append(valuesFiles, p) + } + return mergedValues, valuesFiles, nil +} + +// validatePackageAndWriteToPath atomically writes the packaged chart from reader +// to out while validating it by loading the chart metadata from the archive. +func validatePackageAndWriteToPath(reader io.Reader, out string) error { + tmpFile, err := os.CreateTemp("", filepath.Base(out)) + if err != nil { + return fmt.Errorf("failed to create temporary file for chart: %w", err) + } + defer os.Remove(tmpFile.Name()) + if _, err = tmpFile.ReadFrom(reader); err != nil { + _ = tmpFile.Close() + return fmt.Errorf("failed to write chart to file: %w", err) + } + if err = tmpFile.Close(); err != nil { + return err + } + meta, err := LoadChartMetadataFromArchive(tmpFile.Name()) + if err != nil { + return fmt.Errorf("failed to load chart metadata from written chart: %w", err) + } + if err = meta.Validate(); err != nil { + return fmt.Errorf("failed to validate metadata of written chart: %w", err) + } + if err = sourcefs.RenameWithFallback(tmpFile.Name(), out); err != nil { + return fmt.Errorf("failed to write chart to file: %w", err) + } + return nil +} + +// pathIsDir returns a boolean indicating if the given path points to a directory. +// In case os.Stat on the given path returns an error it returns false as well. +func pathIsDir(p string) bool { + if p == "" { + return false + } + if i, err := os.Stat(p); err != nil || !i.IsDir() { + return false + } + return true +} diff --git a/internal/helm/chart/builder_remote_test.go b/internal/helm/chart/builder_remote_test.go new file mode 100644 index 000000000..7994fa5ee --- /dev/null +++ b/internal/helm/chart/builder_remote_test.go @@ -0,0 +1,599 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "bytes" + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + . "github.com/onsi/gomega" + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + helmgetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/repository" +) + +type mockRegistryClient struct { + tags map[string][]string + requestedURL string +} + +func (m *mockRegistryClient) Tags(url string) ([]string, error) { + m.requestedURL = url + if tags, ok := m.tags[url]; ok { + return tags, nil + } + return nil, fmt.Errorf("no tags found for %s", url) +} + +func (m *mockRegistryClient) Login(url string, opts ...registry.LoginOption) error { + m.requestedURL = url + return nil +} + +func (m *mockRegistryClient) Logout(url string, opts ...registry.LogoutOption) error { + m.requestedURL = url + return nil +} + +// mockIndexChartGetter returns specific response for index and chart queries. +type mockIndexChartGetter struct { + IndexResponse []byte + ChartResponse []byte + ErrorResponse error + requestedURL string +} + +func (g *mockIndexChartGetter) Get(u string, _ ...helmgetter.Option) (*bytes.Buffer, error) { + if g.ErrorResponse != nil { + return nil, g.ErrorResponse + } + g.requestedURL = u + r := g.ChartResponse + if strings.HasSuffix(u, "index.yaml") { + r = g.IndexResponse + } + return bytes.NewBuffer(r), nil +} + +func (g *mockIndexChartGetter) LastGet() string { + return g.requestedURL +} + +func TestRemoteBuilder__BuildFromChartRepository(t *testing.T) { + g := NewWithT(t) + + chartGrafana, err := os.ReadFile("./../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartGrafana).ToNot(BeEmpty()) + + index := []byte(` +apiVersion: v1 +entries: + grafana: + - urls: + - https://example.com/grafana.tgz + description: string + version: 6.17.4 + name: grafana +`) + + mockGetter := &mockIndexChartGetter{ + IndexResponse: index, + ChartResponse: chartGrafana, + } + + mockRepo := func() *repository.ChartRepository { + return &repository.ChartRepository{ + URL: "https://grafana.github.io/helm-charts/", + Client: mockGetter, + RWMutex: &sync.RWMutex{}, + } + } + + tests := []struct { + name string + reference Reference + buildOpts BuildOptions + repository *repository.ChartRepository + wantValues chartutil.Values + wantVersion string + wantPackaged bool + wantErr string + }{ + { + name: "invalid reference", + reference: LocalReference{}, + wantErr: "expected remote chart reference", + }, + { + name: "invalid reference - no name", + reference: RemoteReference{}, + wantErr: "no name set for remote chart reference", + }, + { + name: "chart not in repo", + reference: RemoteReference{Name: "foo"}, + repository: mockRepo(), + wantErr: "failed to get chart version for remote reference", + }, + { + name: "chart version not in repo", + reference: RemoteReference{Name: "grafana", Version: "1.1.1"}, + repository: mockRepo(), + wantErr: "failed to get chart version for remote reference", + }, + { + name: "invalid version metadata", + reference: RemoteReference{Name: "grafana"}, + repository: mockRepo(), + buildOpts: BuildOptions{VersionMetadata: "^"}, + wantErr: "invalid metadata string", + }, + { + name: "with version metadata", + reference: RemoteReference{Name: "grafana"}, + repository: mockRepo(), + buildOpts: BuildOptions{VersionMetadata: "foo"}, + wantVersion: "6.17.4+foo", + wantPackaged: true, + }, + { + name: "default values", + reference: RemoteReference{Name: "grafana"}, + repository: mockRepo(), + wantVersion: "0.1.0", + wantValues: chartutil.Values{ + "replicaCount": float64(1), + }, + }, + { + name: "merge values", + reference: RemoteReference{Name: "grafana"}, + buildOpts: BuildOptions{ + ValuesFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, + }, + repository: mockRepo(), + wantVersion: "6.17.4", + wantValues: chartutil.Values{ + "a": "b", + "b": "d", + }, + wantPackaged: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + targetPath := filepath.Join(tmpDir, "chart.tgz") + + if tt.repository != nil { + g.Expect(tt.repository.CacheIndex()).ToNot(HaveOccurred()) + // Cleanup the cache index path. + defer os.Remove(tt.repository.Path) + } + + b := NewRemoteBuilder(tt.repository) + + cb, err := b.Build(context.TODO(), tt.reference, targetPath, tt.buildOpts) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(cb).To(BeZero()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Packaged).To(Equal(tt.wantPackaged), "unexpected Build.Packaged value") + g.Expect(cb.Path).ToNot(BeEmpty(), "empty Build.Path") + + // Load the resulting chart and verify the values. + resultChart, err := secureloader.LoadFile(cb.Path) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resultChart.Metadata.Version).To(Equal(tt.wantVersion)) + + for k, v := range tt.wantValues { + g.Expect(v).To(Equal(resultChart.Values[k])) + } + }) + } +} + +func TestRemoteBuilder_BuildFromOCIChartRepository(t *testing.T) { + g := NewWithT(t) + + chartGrafana, err := os.ReadFile("./../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartGrafana).ToNot(BeEmpty()) + + registryClient := &mockRegistryClient{ + tags: map[string][]string{ + "localhost:5000/my_repo/grafana": {"6.17.4"}, + "localhost:5000/my_repo/another/grafana": {"6.17.4"}, + }, + } + + mockGetter := &mockIndexChartGetter{ + ChartResponse: chartGrafana, + } + + u, err := url.Parse("oci://localhost:5000/my_repo") + g.Expect(err).ToNot(HaveOccurred()) + + mockRepo := func() *repository.OCIChartRepository { + return &repository.OCIChartRepository{ + URL: *u, + Client: mockGetter, + RegistryClient: registryClient, + } + } + mockRepoWithoutChart := func() *repository.OCIChartRepository { + return &repository.OCIChartRepository{ + URL: *u, + Client: &mockIndexChartGetter{ + ErrorResponse: fmt.Errorf("chart doesn't exist"), + }, + RegistryClient: registryClient, + } + } + + tests := []struct { + name string + reference Reference + buildOpts BuildOptions + repository *repository.OCIChartRepository + wantValues chartutil.Values + wantVersion string + wantPackaged bool + wantErr string + }{ + { + name: "invalid reference", + reference: LocalReference{}, + wantErr: "expected remote chart reference", + }, + { + name: "invalid reference - no name", + reference: RemoteReference{}, + wantErr: "no name set for remote chart reference", + }, + { + name: "chart not in repository", + reference: RemoteReference{Name: "foo"}, + repository: mockRepo(), + wantErr: "failed to get chart version for remote reference", + }, + { + name: "chart version not in repository", + reference: RemoteReference{Name: "grafana", Version: "1.1.1"}, + repository: mockRepoWithoutChart(), + wantErr: "failed to download chart for remote reference: failed to get", + }, + { + name: "invalid version metadata", + reference: RemoteReference{Name: "grafana"}, + repository: mockRepo(), + buildOpts: BuildOptions{VersionMetadata: "^"}, + wantErr: "invalid metadata string", + }, + { + name: "with version metadata", + reference: RemoteReference{Name: "grafana"}, + repository: mockRepo(), + buildOpts: BuildOptions{VersionMetadata: "foo"}, + wantVersion: "6.17.4+foo", + wantPackaged: true, + }, + { + name: "default values", + reference: RemoteReference{Name: "grafana"}, + repository: mockRepo(), + wantVersion: "0.1.0", + wantValues: chartutil.Values{ + "replicaCount": float64(1), + }, + }, + { + name: "default values", + reference: RemoteReference{Name: "another/grafana"}, + repository: mockRepo(), + wantVersion: "0.1.0", + wantValues: chartutil.Values{ + "replicaCount": float64(1), + }, + }, + { + name: "merge values", + reference: RemoteReference{Name: "grafana"}, + buildOpts: BuildOptions{ + ValuesFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, + }, + repository: mockRepo(), + wantVersion: "6.17.4", + wantValues: chartutil.Values{ + "a": "b", + "b": "d", + }, + wantPackaged: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "remote-chart-builder-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + targetPath := filepath.Join(tmpDir, "chart.tgz") + + b := NewRemoteBuilder(tt.repository) + + cb, err := b.Build(context.TODO(), tt.reference, targetPath, tt.buildOpts) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred(), "expected error '%s'", tt.wantErr) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(cb).To(BeZero()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Packaged).To(Equal(tt.wantPackaged), "unexpected Build.Packaged value") + g.Expect(cb.Path).ToNot(BeEmpty(), "empty Build.Path") + + // Load the resulting chart and verify the values. + resultChart, err := secureloader.LoadFile(cb.Path) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(resultChart.Metadata.Version).To(Equal(tt.wantVersion)) + + for k, v := range tt.wantValues { + g.Expect(v).To(Equal(resultChart.Values[k])) + } + }) + } +} + +func TestRemoteBuilder_Build_CachedChart(t *testing.T) { + g := NewWithT(t) + + chartGrafana, err := os.ReadFile("./../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartGrafana).ToNot(BeEmpty()) + + index := []byte(` +apiVersion: v1 +entries: + helmchart: + - urls: + - https://example.com/helmchart-0.1.0.tgz + description: string + version: 0.1.0 + name: helmchart +`) + + mockGetter := &mockIndexChartGetter{ + IndexResponse: index, + ChartResponse: chartGrafana, + } + mockRepo := func() *repository.ChartRepository { + return &repository.ChartRepository{ + URL: "https://grafana.github.io/helm-charts/", + Client: mockGetter, + RWMutex: &sync.RWMutex{}, + } + } + + reference := RemoteReference{Name: "helmchart"} + repository := mockRepo() + + err = repository.CacheIndex() + g.Expect(err).ToNot(HaveOccurred()) + // Cleanup the cache index path. + defer os.Remove(repository.Path) + + b := NewRemoteBuilder(repository) + + tmpDir := t.TempDir() + + // Build first time. + targetPath := filepath.Join(tmpDir, "chart1.tgz") + buildOpts := BuildOptions{} + cb, err := b.Build(context.TODO(), reference, targetPath, buildOpts) + g.Expect(err).ToNot(HaveOccurred()) + + // Set the result as the CachedChart for second build. + buildOpts.CachedChart = cb.Path + + // Rebuild with a new path. + targetPath2 := filepath.Join(tmpDir, "chart2.tgz") + cb, err = b.Build(context.TODO(), reference, targetPath2, buildOpts) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Path).To(Equal(targetPath)) + + // Rebuild with build option Force. + buildOpts.Force = true + cb, err = b.Build(context.TODO(), reference, targetPath2, buildOpts) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cb.Path).To(Equal(targetPath2)) +} + +func Test_mergeChartValues(t *testing.T) { + tests := []struct { + name string + chart *helmchart.Chart + paths []string + ignoreMissing bool + wantValues map[string]interface{} + wantFiles []string + wantErr string + }{ + { + name: "merges values", + chart: &helmchart.Chart{ + Files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + {Name: "b.yaml", Data: []byte("b: c")}, + {Name: "c.yaml", Data: []byte("b: d")}, + }, + }, + paths: []string{"a.yaml", "b.yaml", "c.yaml"}, + wantValues: map[string]interface{}{ + "a": "b", + "b": "d", + }, + wantFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, + }, + { + name: "uses chart values", + chart: &helmchart.Chart{ + Files: []*helmchart.File{ + {Name: "c.yaml", Data: []byte("b: d")}, + }, + Values: map[string]interface{}{ + "a": "b", + }, + }, + paths: []string{chartutil.ValuesfileName, "c.yaml"}, + wantValues: map[string]interface{}{ + "a": "b", + "b": "d", + }, + wantFiles: []string{chartutil.ValuesfileName, "c.yaml"}, + }, + { + name: "unmarshal error", + chart: &helmchart.Chart{ + Files: []*helmchart.File{ + {Name: "invalid", Data: []byte("abcd")}, + }, + }, + paths: []string{"invalid"}, + wantErr: "unmarshaling values from 'invalid' failed", + }, + { + name: "error on invalid path", + chart: &helmchart.Chart{}, + paths: []string{"a.yaml"}, + wantErr: "no values file found at path 'a.yaml'", + }, + { + name: "merges values ignoring file missing", + chart: &helmchart.Chart{ + Files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + }, + }, + paths: []string{"a.yaml", "b.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{"a.yaml"}, + }, + { + name: "merges values ignoring all missing", + chart: &helmchart.Chart{}, + paths: []string{"a.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{}, + wantFiles: []string{}, + }, + { + name: "uses chart values ignoring missing file", + chart: &helmchart.Chart{ + Values: map[string]interface{}{ + "a": "b", + }, + }, + paths: []string{chartutil.ValuesfileName, "c.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{chartutil.ValuesfileName}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + gotValues, gotFiles, err := mergeChartValues(tt.chart, tt.paths, tt.ignoreMissing) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(gotValues).To(BeNil()) + g.Expect(gotFiles).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(gotValues).To(Equal(tt.wantValues)) + g.Expect(gotFiles).To(Equal(tt.wantFiles)) + }) + } +} + +func Test_validatePackageAndWriteToPath(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + validF, err := os.Open("./../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + defer validF.Close() + + chartPath := filepath.Join(tmpDir, "chart.tgz") + err = validatePackageAndWriteToPath(validF, chartPath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartPath).To(BeARegularFile()) + + emptyF, err := os.Open("./../testdata/charts/empty.tgz") + g.Expect(err).ToNot(HaveOccurred()) + defer emptyF.Close() + err = validatePackageAndWriteToPath(emptyF, filepath.Join(tmpDir, "out.tgz")) + g.Expect(err).To(HaveOccurred()) +} + +func Test_pathIsDir(t *testing.T) { + tests := []struct { + name string + p string + want bool + }{ + {name: "directory", p: "../testdata/", want: true}, + {name: "file", p: "../testdata/local-index.yaml", want: false}, + {name: "not found error", p: "../testdata/does-not-exist.yaml", want: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(pathIsDir(tt.p)).To(Equal(tt.want)) + }) + } +} diff --git a/internal/helm/chart/builder_test.go b/internal/helm/chart/builder_test.go new file mode 100644 index 000000000..d3fa55e38 --- /dev/null +++ b/internal/helm/chart/builder_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "crypto/rand" + "encoding/hex" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/chartutil" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" +) + +func TestLocalReference_Validate(t *testing.T) { + tests := []struct { + name string + ref LocalReference + wantErr string + }{ + { + name: "ref with path and work dir", + ref: LocalReference{WorkDir: "/workdir/", Path: "./a/path"}, + }, + { + name: "ref without work dir", + ref: LocalReference{Path: "/a/path"}, + wantErr: "no work dir set for local chart reference", + }, + { + name: "ref with relative work dir", + ref: LocalReference{WorkDir: "../a/path", Path: "foo"}, + wantErr: "local chart reference work dir is expected to be absolute", + }, + { + name: "ref without path", + ref: LocalReference{WorkDir: "/just/a/workdir"}, + wantErr: "no path set for local chart reference", + }, + { + name: "ref with an absolute path", + ref: LocalReference{WorkDir: "/a/path", Path: "/foo"}, + wantErr: "local chart reference path is expected to be relative", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := tt.ref.Validate() + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestRemoteReference_Validate(t *testing.T) { + tests := []struct { + name string + ref RemoteReference + wantErr string + }{ + { + name: "ref with name", + ref: RemoteReference{Name: "valid-chart-name"}, + }, + { + name: "ref with single-character name", + ref: RemoteReference{Name: "a"}, + }, + { + name: "ref with invalid name", + ref: RemoteReference{Name: "iNvAlID-ChArT-NAmE!"}, + wantErr: "invalid chart name 'iNvAlID-ChArT-NAmE!'", + }, + { + name: "ref with Artifactory specific valid format", + ref: RemoteReference{Name: "i-shall/not"}, + }, + { + name: "ref without name", + ref: RemoteReference{}, + wantErr: "no name set for remote chart reference", + }, + { + name: "ref with only a slash", + ref: RemoteReference{Name: "/"}, + wantErr: "invalid chart name '/'", + }, + { + name: "ref with double slash", + ref: RemoteReference{Name: "not//a/valid/chart"}, + wantErr: "invalid chart name 'not//a/valid/chart'", + }, + { + name: "ref with period in name", + ref: RemoteReference{Name: "valid.chart.name"}, + }, + { + name: "ref with double period in name", + ref: RemoteReference{Name: "../valid-chart-name"}, + wantErr: "invalid chart name '../valid-chart-name", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := tt.ref.Validate() + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestBuildOptions_GetValuesFiles(t *testing.T) { + tests := []struct { + name string + valuesFiles []string + want []string + }{ + { + name: "Default values.yaml", + valuesFiles: []string{chartutil.ValuesfileName}, + want: nil, + }, + { + name: "Values files", + valuesFiles: []string{chartutil.ValuesfileName, "foo.yaml"}, + want: []string{chartutil.ValuesfileName, "foo.yaml"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + o := BuildOptions{ValuesFiles: tt.valuesFiles} + g.Expect(o.GetValuesFiles()).To(Equal(tt.want)) + }) + } +} + +func TestChartBuildResult_Summary(t *testing.T) { + tests := []struct { + name string + build *Build + want string + }{ + { + name: "Build with metadata", + build: &Build{ + Name: "chart", + Version: "1.2.3-rc.1+bd6bf40", + }, + want: "new 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + }, + { + name: "Pulled chart", + build: &Build{ + Name: "chart", + Version: "1.2.3-rc.1+bd6bf40", + Path: "chart.tgz", + }, + want: "pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + }, + { + name: "Packaged chart", + build: &Build{ + Name: "chart", + Version: "arbitrary-version", + Packaged: true, + ValuesFiles: []string{"a.yaml", "b.yaml"}, + Path: "chart.tgz", + }, + want: "packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", + }, + { + name: "With values files", + build: &Build{ + Name: "chart", + Version: "arbitrary-version", + Packaged: true, + ValuesFiles: []string{"a.yaml", "b.yaml"}, + Path: "chart.tgz", + }, + want: "packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", + }, + { + name: "Empty build", + build: &Build{}, + want: "no chart build", + }, + { + name: "Nil build", + build: nil, + want: "no chart build", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(tt.build.Summary()).To(Equal(tt.want)) + }) + } +} + +func TestChartBuildResult_String(t *testing.T) { + g := NewWithT(t) + + var result *Build + g.Expect(result.String()).To(Equal("")) + result = &Build{} + g.Expect(result.String()).To(Equal("")) + result = &Build{Path: "/foo/"} + g.Expect(result.String()).To(Equal("/foo/")) +} + +func Test_packageToPath(t *testing.T) { + g := NewWithT(t) + + chart, err := secureloader.LoadFile("../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chart).ToNot(BeNil()) + + out := tmpFile("chart-0.1.0", ".tgz") + defer os.RemoveAll(out) + err = packageToPath(chart, out) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).To(BeARegularFile()) + _, err = secureloader.LoadFile(out) + g.Expect(err).ToNot(HaveOccurred()) +} + +func tmpFile(prefix, suffix string) string { + randBytes := make([]byte, 16) + rand.Read(randBytes) + return filepath.Join(os.TempDir(), prefix+hex.EncodeToString(randBytes)+suffix) +} diff --git a/internal/helm/chart/dependency_manager.go b/internal/helm/chart/dependency_manager.go new file mode 100644 index 000000000..8a3f0ccfb --- /dev/null +++ b/internal/helm/chart/dependency_manager.go @@ -0,0 +1,351 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" + securejoin "github.com/cyphar/filepath-securejoin" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" + helmchart "helm.sh/helm/v3/pkg/chart" + "k8s.io/apimachinery/pkg/util/errors" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/repository" +) + +// GetChartDownloaderCallback must return a Downloader for the +// URL or an error describing why it could not be returned. +type GetChartDownloaderCallback func(url string) (repository.Downloader, error) + +// DependencyManager manages dependencies for a Helm chart. +type DependencyManager struct { + // downloaders contains a map of Downloader objects + // indexed by their repository.NormalizeURL. + // It is consulted as a lookup table for missing dependencies, based on + // the (repository) URL the dependency refers to. + downloaders map[string]repository.Downloader + + // getChartDownloaderCallback can be set to an on-demand GetChartDownloaderCallback + // whose returned result is cached to downloaders. + getChartDownloaderCallback GetChartDownloaderCallback + + // concurrent is the number of concurrent chart-add operations during + // Build. Defaults to 1 (non-concurrent). + concurrent int64 + + // mu contains the lock for chart writes. + mu sync.Mutex +} + +// DependencyManagerOption configures an option on a DependencyManager. +type DependencyManagerOption interface { + applyToDependencyManager(dm *DependencyManager) +} + +type WithRepositories map[string]repository.Downloader + +func (o WithRepositories) applyToDependencyManager(dm *DependencyManager) { + dm.downloaders = o +} + +type WithDownloaderCallback GetChartDownloaderCallback + +func (o WithDownloaderCallback) applyToDependencyManager(dm *DependencyManager) { + dm.getChartDownloaderCallback = GetChartDownloaderCallback(o) +} + +type WithConcurrent int64 + +func (o WithConcurrent) applyToDependencyManager(dm *DependencyManager) { + dm.concurrent = int64(o) +} + +// NewDependencyManager returns a new DependencyManager configured with the given +// DependencyManagerOption list. +func NewDependencyManager(opts ...DependencyManagerOption) *DependencyManager { + dm := &DependencyManager{} + for _, v := range opts { + v.applyToDependencyManager(dm) + } + return dm +} + +// Clear iterates over the downloaders, calling Clear on all +// items. It returns an aggregate error of all Clear errors. +func (dm *DependencyManager) Clear() error { + var errs []error + for _, v := range dm.downloaders { + if v != nil { + errs = append(errs, v.Clear()) + } + } + return errors.NewAggregate(errs) +} + +// Build compiles a set of missing dependencies from chart.Chart, and attempts to +// resolve and build them using the information from Reference. +// It returns the number of resolved local and remote dependencies, or an error. +func (dm *DependencyManager) Build(ctx context.Context, ref Reference, chart *helmchart.Chart) (int, error) { + // Collect dependency metadata + var ( + deps = chart.Dependencies() + reqs = chart.Metadata.Dependencies + ) + // Lock file takes precedence + if lock := chart.Lock; lock != nil { + reqs = lock.Dependencies + } + + // Collect missing dependencies + missing := collectMissing(deps, reqs) + if len(missing) == 0 { + return 0, nil + } + + // Run the build for the missing dependencies + if err := dm.build(ctx, ref, chart, missing); err != nil { + return 0, err + } + return len(missing), nil +} + +// chartWithLock holds a chart.Chart with a sync.Mutex to lock for writes. +type chartWithLock struct { + *helmchart.Chart + mu sync.Mutex +} + +// build adds the given list of deps to the chart with the configured number of +// concurrent workers. If the chart.Chart references a local dependency but no +// LocalReference is given, or any dependency could not be added, an error +// is returned. The first error it encounters cancels all other workers. +func (dm *DependencyManager) build(ctx context.Context, ref Reference, c *helmchart.Chart, deps map[string]*helmchart.Dependency) error { + current := dm.concurrent + if current <= 0 { + current = 1 + } + + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + sem := semaphore.NewWeighted(current) + c := &chartWithLock{Chart: c} + for name, dep := range deps { + name, dep := name, dep + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() (err error) { + defer sem.Release(1) + if isLocalDep(dep) { + localRef, ok := ref.(LocalReference) + if !ok { + err = fmt.Errorf("failed to add local dependency '%s': no local chart reference", name) + return + } + if err = dm.addLocalDependency(localRef, c, dep); err != nil { + err = fmt.Errorf("failed to add local dependency '%s': %w", name, err) + } + return + } + if err = dm.addRemoteDependency(c, dep); err != nil { + err = fmt.Errorf("failed to add remote dependency '%s': %w", name, err) + } + return + }) + } + return nil + }) + return group.Wait() +} + +// addLocalDependency attempts to resolve and add the given local chart.Dependency +// to the chart. +func (dm *DependencyManager) addLocalDependency(ref LocalReference, c *chartWithLock, dep *helmchart.Dependency) error { + sLocalChartPath, err := dm.secureLocalChartPath(ref, dep) + if err != nil { + return err + } + + if _, err := os.Stat(sLocalChartPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("no chart found at '%s' (reference '%s')", strings.TrimPrefix(sLocalChartPath, ref.WorkDir), dep.Repository) + } + return err + } + + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + err = fmt.Errorf("invalid version/constraint format '%s': %w", dep.Version, err) + return err + } + + ch, err := secureloader.Load(ref.WorkDir, sLocalChartPath) + if err != nil { + return fmt.Errorf("failed to load chart from '%s' (reference '%s'): %w", + strings.TrimPrefix(sLocalChartPath, ref.WorkDir), dep.Repository, err) + } + + ver, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return err + } + + if !constraint.Check(ver) { + err = fmt.Errorf("can't get a valid version for constraint '%s'", dep.Version) + return err + } + + if dep.Alias != "" { + ch.Metadata.Name = dep.Alias + } + + c.mu.Lock() + c.AddDependency(ch) + c.mu.Unlock() + return nil +} + +// addRemoteDependency attempts to resolve and add the given remote chart.Dependency +// to the chart. It locks the chartWithLock before the downloaded dependency is +// added to the chart. +func (dm *DependencyManager) addRemoteDependency(chart *chartWithLock, dep *helmchart.Dependency) error { + repo, err := dm.resolveRepository(dep.Repository) + if err != nil { + return err + } + + ver, err := repo.GetChartVersion(dep.Name, dep.Version) + if err != nil { + return fmt.Errorf("failed to get chart '%s' version '%s' from '%s': %w", dep.Name, dep.Version, dep.Repository, err) + } + res, err := repo.DownloadChart(ver) + if err != nil { + return fmt.Errorf("chart download of version '%s' failed: %w", ver.Version, err) + } + ch, err := secureloader.LoadArchive(res) + if err != nil { + return fmt.Errorf("failed to load downloaded archive of version '%s': %w", ver.Version, err) + } + + if dep.Alias != "" { + ch.Metadata.Name = dep.Alias + } + + chart.mu.Lock() + chart.AddDependency(ch) + chart.mu.Unlock() + return nil +} + +// resolveRepository first attempts to resolve the url from the downloaders, falling back +// to getDownloaderCallback if set. It returns the resolved Index, or an error. +func (dm *DependencyManager) resolveRepository(url string) (repo repository.Downloader, err error) { + dm.mu.Lock() + defer dm.mu.Unlock() + + nUrl, err := repository.NormalizeURL(url) + if err != nil { + return + } + err = repository.ValidateDepURL(nUrl) + if err != nil { + return + } + if _, ok := dm.downloaders[nUrl]; !ok { + if dm.getChartDownloaderCallback == nil { + err = fmt.Errorf("no chart repository for URL '%s'", nUrl) + return + } + + if dm.downloaders == nil { + dm.downloaders = map[string]repository.Downloader{} + } + + if dm.downloaders[nUrl], err = dm.getChartDownloaderCallback(nUrl); err != nil { + err = fmt.Errorf("failed to get chart repository for URL '%s': %w", nUrl, err) + return + } + } + return dm.downloaders[nUrl], nil +} + +// secureLocalChartPath returns the secure absolute path of a local dependency. +// It does not allow the dependency's path to be outside the scope of +// LocalReference.WorkDir. +func (dm *DependencyManager) secureLocalChartPath(ref LocalReference, dep *helmchart.Dependency) (string, error) { + if dep.Repository == "" { + return securejoin.SecureJoin(ref.WorkDir, filepath.Join(ref.Path, "charts", dep.Name)) + } + localUrl, err := url.Parse(dep.Repository) + if err != nil { + return "", fmt.Errorf("failed to parse alleged local chart reference: %w", err) + } + if localUrl.Scheme != "" && localUrl.Scheme != "file" { + return "", fmt.Errorf("'%s' is not a local chart reference", dep.Repository) + } + return securejoin.SecureJoin(ref.WorkDir, filepath.Join(ref.Path, localUrl.Host, localUrl.Path)) +} + +// collectMissing returns a map with dependencies from reqs that are missing +// from current, indexed by their alias or name. All dependencies of a chart +// are present if len of returned map == 0. +func collectMissing(current []*helmchart.Chart, reqs []*helmchart.Dependency) map[string]*helmchart.Dependency { + // If the number of dependencies equals the number of requested + // dependencies, there are no missing dependencies + if len(current) == len(reqs) { + return nil + } + + // Build up a map of reqs that are not in current, indexed by their + // alias or name + var missing map[string]*helmchart.Dependency + for _, dep := range reqs { + name := dep.Name + if dep.Alias != "" { + name = dep.Alias + } + // Exclude existing dependencies + found := false + for _, existing := range current { + if existing.Name() == name { + found = true + } + } + if found { + continue + } + if missing == nil { + missing = map[string]*helmchart.Dependency{} + } + missing[name] = dep + } + return missing +} + +// isLocalDep returns true if the given chart.Dependency contains a local (file) path reference. +func isLocalDep(dep *helmchart.Dependency) bool { + return dep.Repository == "" || strings.HasPrefix(dep.Repository, "file://") +} diff --git a/internal/helm/chart/dependency_manager_test.go b/internal/helm/chart/dependency_manager_test.go new file mode 100644 index 000000000..241959fbe --- /dev/null +++ b/internal/helm/chart/dependency_manager_test.go @@ -0,0 +1,994 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "sync" + "testing" + + . "github.com/onsi/gomega" + helmchart "helm.sh/helm/v3/pkg/chart" + helmgetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" + "github.com/fluxcd/source-controller/internal/helm/repository" +) + +type mockTagsGetter struct { + tags map[string][]string +} + +func (m *mockTagsGetter) Tags(requestURL string) ([]string, error) { + u, err := url.Parse(requestURL) + if err != nil { + return nil, err + } + + name := filepath.Base(u.Path) + if tags, ok := m.tags[name]; ok { + return tags, nil + } + return nil, fmt.Errorf("no tags found for %s with requestURL %s", name, requestURL) +} + +func (m *mockTagsGetter) Login(_ string, _ ...registry.LoginOption) error { + return nil +} + +func (m *mockTagsGetter) Logout(_ string, _ ...registry.LogoutOption) error { + return nil +} + +// mockGetter is a simple mocking getter.Getter implementation, returning +// a byte response to any provided URL. +type mockGetter struct { + Response []byte +} + +func (g *mockGetter) Get(_ string, _ ...helmgetter.Option) (*bytes.Buffer, error) { + r := g.Response + return bytes.NewBuffer(r), nil +} + +func TestDependencyManager_Clear(t *testing.T) { + g := NewWithT(t) + + file, err := os.CreateTemp("", "") + g.Expect(err).ToNot(HaveOccurred()) + ociRepoWithCreds, err := repository.NewOCIChartRepository("oci://example.com", repository.WithCredentialsFile(file.Name())) + g.Expect(err).ToNot(HaveOccurred()) + + downloaders := map[string]repository.Downloader{ + "with index": &repository.ChartRepository{ + Index: repo.NewIndexFile(), + RWMutex: &sync.RWMutex{}, + }, + "with credentials": ociRepoWithCreds, + "without credentials": &repository.OCIChartRepository{}, + "nil downloader": nil, + } + + dm := NewDependencyManager(WithRepositories(downloaders)) + g.Expect(dm.Clear()).To(BeNil()) + g.Expect(dm.downloaders).To(HaveLen(len(downloaders))) + for _, v := range downloaders { + switch v := v.(type) { + case *repository.ChartRepository: + g.Expect(v.Index).To(BeNil()) + case *repository.OCIChartRepository: + g.Expect(v.HasCredentials()).To(BeFalse()) + } + } + + if _, err := os.Stat(file.Name()); !errors.Is(err, os.ErrNotExist) { + err = os.Remove(file.Name()) + g.Expect(err).ToNot(HaveOccurred()) + } +} + +func TestDependencyManager_Build(t *testing.T) { + g := NewWithT(t) + + // Mock chart used as grafana chart in the test below. The cached repository + // takes care of the actual grafana related details in the chart index. + chartGrafana, err := os.ReadFile("./../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartGrafana).ToNot(BeEmpty()) + + mockrepos := []repository.Downloader{ + &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + Client: &mockGetter{ + Response: chartGrafana, + }, + RegistryClient: &mockTagsGetter{ + tags: map[string][]string{ + "grafana": {"6.17.4"}, + }, + }, + }, + &repository.ChartRepository{ + Client: &mockGetter{ + Response: chartGrafana, + }, + Index: &repo.IndexFile{ + Entries: map[string]repo.ChartVersions{ + "grafana": { + &repo.ChartVersion{ + Metadata: &helmchart.Metadata{ + Name: "grafana", + Version: "6.17.4", + }, + URLs: []string{"https://example.com/grafana.tgz"}, + }, + }, + }, + }, + RWMutex: &sync.RWMutex{}, + }, + } + + for _, repo := range mockrepos { + build(t, repo) + } +} + +func build(t *testing.T, mockRepo repository.Downloader) { + tests := []struct { + name string + baseDir string + path string + downloaders map[string]repository.Downloader + getChartDownloaderCallback GetChartDownloaderCallback + want int + wantChartFunc func(g *WithT, c *helmchart.Chart) + wantErr string + }{ + { + name: "build failure returns error", + baseDir: "./../testdata/charts", + path: "helmchartwithdeps", + wantErr: "failed to add remote dependency 'grafana': no chart repository for URL", + }, + { + name: "no dependencies returns zero", + baseDir: "./../testdata/charts", + path: "helmchart", + wantChartFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(0)) + }, + want: 0, + }, + { + name: "no dependency returns zero - v1", + baseDir: "./../testdata/charts", + path: "helmchart-v1", + wantChartFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(0)) + }, + want: 0, + }, + { + name: "build with dependencies using lock file", + baseDir: "./../testdata/charts", + path: "helmchartwithdeps", + downloaders: map[string]repository.Downloader{ + "https://grafana.github.io/helm-charts/": mockRepo, + }, + getChartDownloaderCallback: func(url string) (repository.Downloader, error) { + return &repository.ChartRepository{URL: "https://grafana.github.io/helm-charts/"}, nil + }, + wantChartFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(2)) + g.Expect(c.Lock.Dependencies).To(HaveLen(3)) + }, + want: 2, + }, + { + name: "build with dependencies - v1", + baseDir: "./../testdata/charts", + path: "helmchartwithdeps-v1", + wantChartFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + }, + want: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + chart, err := secureloader.Load(tt.baseDir, tt.path) + g.Expect(err).ToNot(HaveOccurred()) + + dm := NewDependencyManager( + WithRepositories(tt.downloaders), + WithDownloaderCallback(tt.getChartDownloaderCallback), + ) + absBaseDir, err := filepath.Abs(tt.baseDir) + g.Expect(err).ToNot(HaveOccurred()) + got, err := dm.Build(context.TODO(), LocalReference{WorkDir: absBaseDir, Path: tt.path}, chart) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(got).To(BeZero()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + if tt.wantChartFunc != nil { + tt.wantChartFunc(g, chart) + } + }) + } +} + +func TestDependencyManager_build(t *testing.T) { + tests := []struct { + name string + deps map[string]*helmchart.Dependency + wantErr string + }{ + { + name: "error remote dependency", + deps: map[string]*helmchart.Dependency{ + "example": {Repository: "https://example.com"}, + }, + wantErr: "failed to add remote dependency", + }, + { + name: "error local dependency", + deps: map[string]*helmchart.Dependency{ + "example": {Repository: "file:///invalid"}, + }, + wantErr: "failed to add remote dependency", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + dm := NewDependencyManager() + err := dm.build(context.TODO(), LocalReference{}, &helmchart.Chart{}, tt.deps) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestDependencyManager_addLocalDependency(t *testing.T) { + tests := []struct { + name string + chartName string + dep *helmchart.Dependency + wantErr string + wantFunc func(g *WithT, c *helmchart.Chart) + }{ + { + name: "local dependency", + chartName: "helmchartwithdeps", + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + Repository: "file://../helmchart", + }, + wantFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + }, + }, + { + name: "version not matching constraint", + chartName: "helmchartwithdeps", + dep: &helmchart.Dependency{ + Name: chartName, + Version: "0.2.0", + Repository: "file://../helmchart", + }, + wantErr: "can't get a valid version for constraint '0.2.0'", + }, + { + name: "invalid local reference", + chartName: "helmchartwithdeps", + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + Repository: "file://../../../absolutely/invalid", + }, + wantErr: "no chart found at '/absolutely/invalid'", + }, + { + name: "invalid chart archive", + chartName: "helmchartwithdeps", + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + Repository: "file://../empty.tgz", + }, + wantErr: "failed to load chart from '/empty.tgz'", + }, + { + name: "invalid constraint", + chartName: "helmchartwithdeps", + dep: &helmchart.Dependency{ + Name: chartName, + Version: "invalid", + Repository: "file://../helmchart", + }, + wantErr: "invalid version/constraint format 'invalid'", + }, + { + name: "no repository", + chartName: "helmchartwithdepsnorepo", + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + }, + wantFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + }, + }, + { + name: "no repository invalid reference", + chartName: "helmchartwithdepsnorepo", + dep: &helmchart.Dependency{ + Name: "nonexistingchart", + Version: chartVersion, + }, + wantErr: "no chart found at '/helmchartwithdepsnorepo/charts/nonexistingchart'", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + dm := NewDependencyManager() + chart := &helmchart.Chart{} + + absWorkDir, err := filepath.Abs("../testdata/charts") + g.Expect(err).ToNot(HaveOccurred()) + + err = dm.addLocalDependency(LocalReference{WorkDir: absWorkDir, Path: tt.chartName}, + &chartWithLock{Chart: chart}, tt.dep) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + if tt.wantFunc != nil { + tt.wantFunc(g, chart) + } + }) + } +} + +func TestDependencyManager_addRemoteDependency(t *testing.T) { + g := NewWithT(t) + + chartB, err := os.ReadFile("../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartB).ToNot(BeEmpty()) + + tests := []struct { + name string + downloaders map[string]repository.Downloader + dep *helmchart.Dependency + wantFunc func(g *WithT, c *helmchart.Chart) + wantErr string + }{ + { + name: "adds remote dependency", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{ + Client: &mockGetter{ + Response: chartB, + }, + Index: &repo.IndexFile{ + Entries: map[string]repo.ChartVersions{ + chartName: { + &repo.ChartVersion{ + Metadata: &helmchart.Metadata{ + Name: chartName, + Version: chartVersion, + }, + URLs: []string{"https://example.com/foo.tgz"}, + }, + }, + }, + }, + RWMutex: &sync.RWMutex{}, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Repository: "https://example.com", + }, + wantFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + }, + }, + { + name: "resolve repository error", + downloaders: map[string]repository.Downloader{}, + dep: &helmchart.Dependency{ + Repository: "https://example.com", + }, + wantErr: "no chart repository for URL", + }, + { + name: "resolve aliased repository error", + downloaders: map[string]repository.Downloader{}, + dep: &helmchart.Dependency{ + Repository: "@fantastic-charts", + }, + wantErr: "aliased repository dependency is not supported", + }, + { + name: "strategic load error", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{ + Client: &mockGetter{}, + RWMutex: &sync.RWMutex{}, + }, + }, + dep: &helmchart.Dependency{ + Repository: "https://example.com", + }, + wantErr: "failed to load index", + }, + { + name: "repository get error", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{ + Index: &repo.IndexFile{}, + RWMutex: &sync.RWMutex{}, + }, + }, + dep: &helmchart.Dependency{ + Repository: "https://example.com", + }, + wantErr: "no chart name found", + }, + { + name: "repository version constraint error", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{ + Index: &repo.IndexFile{ + Entries: map[string]repo.ChartVersions{ + chartName: { + &repo.ChartVersion{ + Metadata: &helmchart.Metadata{ + Name: chartName, + Version: "0.1.0", + }, + }, + }, + }, + }, + RWMutex: &sync.RWMutex{}, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Version: "0.2.0", + Repository: "https://example.com", + }, + wantErr: fmt.Sprintf("no '%s' chart with version matching '0.2.0' found", chartName), + }, + { + name: "repository chart download error", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{ + Index: &repo.IndexFile{ + Entries: map[string]repo.ChartVersions{ + chartName: { + &repo.ChartVersion{ + Metadata: &helmchart.Metadata{ + Name: chartName, + Version: chartVersion, + }, + }, + }, + }, + }, + RWMutex: &sync.RWMutex{}, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + Repository: "https://example.com", + }, + wantErr: "chart download of version '0.1.0' failed", + }, + { + name: "chart load error", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{ + Client: &mockGetter{}, + Index: &repo.IndexFile{ + Entries: map[string]repo.ChartVersions{ + chartName: { + &repo.ChartVersion{ + Metadata: &helmchart.Metadata{ + Name: chartName, + Version: chartVersion, + }, + URLs: []string{"https://example.com/foo.tgz"}, + }, + }, + }, + }, + RWMutex: &sync.RWMutex{}, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + Repository: "https://example.com", + }, + wantErr: "failed to load downloaded archive of version '0.1.0'", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + dm := &DependencyManager{ + downloaders: tt.downloaders, + } + chart := &helmchart.Chart{} + err := dm.addRemoteDependency(&chartWithLock{Chart: chart}, tt.dep) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + if tt.wantFunc != nil { + tt.wantFunc(g, chart) + } + }) + } +} + +func TestDependencyManager_addRemoteOCIDependency(t *testing.T) { + g := NewWithT(t) + + chartB, err := os.ReadFile("../testdata/charts/helmchart-0.1.0.tgz") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(chartB).ToNot(BeEmpty()) + + tests := []struct { + name string + downloaders map[string]repository.Downloader + dep *helmchart.Dependency + wantFunc func(g *WithT, c *helmchart.Chart) + wantErr string + }{ + { + name: "adds remote oci dependency", + downloaders: map[string]repository.Downloader{ + "oci://example.com": &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + Client: &mockGetter{ + Response: chartB, + }, + RegistryClient: &mockTagsGetter{ + tags: map[string][]string{ + "helmchart": {"0.1.0"}, + }, + }, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Repository: "oci://example.com", + }, + wantFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + dep := c.Dependencies()[0] + g.Expect(dep).NotTo(BeNil()) + }, + }, + { + name: "remote oci repository fetch tags error", + downloaders: map[string]repository.Downloader{ + "oci://example.com": &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + RegistryClient: &mockTagsGetter{ + tags: map[string][]string{}, + }, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Repository: "oci://example.com", + }, + wantErr: fmt.Sprintf("no tags found for %s", chartName), + }, + { + name: "remote oci repository version constraint error", + downloaders: map[string]repository.Downloader{ + "oci://example.com": &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + Client: &mockGetter{}, + RegistryClient: &mockTagsGetter{ + tags: map[string][]string{ + "helmchart": {"0.1.0"}, + }, + }, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Version: "0.2.0", + Repository: "oci://example.com", + }, + wantErr: "failed to load downloaded archive of version '0.2.0'", + }, + { + name: "chart load error", + downloaders: map[string]repository.Downloader{ + "oci://example.com": &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + Client: &mockGetter{}, + RegistryClient: &mockTagsGetter{ + tags: map[string][]string{ + "helmchart": {"0.1.0"}, + }, + }, + }, + }, + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + Repository: "oci://example.com", + }, + wantErr: "failed to load downloaded archive of version '0.1.0'", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + dm := &DependencyManager{ + downloaders: tt.downloaders, + } + chart := &helmchart.Chart{} + err := dm.addRemoteDependency(&chartWithLock{Chart: chart}, tt.dep) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + if tt.wantFunc != nil { + tt.wantFunc(g, chart) + } + }) + } +} + +func TestDependencyManager_resolveRepository(t *testing.T) { + tests := []struct { + name string + downloaders map[string]repository.Downloader + getChartDownloaderCallback GetChartDownloaderCallback + url string + want repository.Downloader + wantDownloaders map[string]repository.Downloader + wantErr string + }{ + { + name: "resolves from downloaders index", + url: "https://example.com", + downloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{URL: "https://example.com"}, + }, + want: &repository.ChartRepository{URL: "https://example.com"}, + }, + { + name: "resolves from callback", + url: "https://example.com", + getChartDownloaderCallback: func(_ string) (repository.Downloader, error) { + return &repository.ChartRepository{URL: "https://example.com"}, nil + }, + want: &repository.ChartRepository{URL: "https://example.com"}, + wantDownloaders: map[string]repository.Downloader{ + "https://example.com/": &repository.ChartRepository{URL: "https://example.com"}, + }, + }, + { + name: "error from callback", + url: "https://example.com", + getChartDownloaderCallback: func(_ string) (repository.Downloader, error) { + return nil, errors.New("a very unique error") + }, + wantErr: "a very unique error", + wantDownloaders: map[string]repository.Downloader{}, + }, + { + name: "error on not found", + url: "https://example.com", + wantErr: "no chart repository for URL", + }, + { + name: "resolves from oci repository", + url: "oci://example.com", + downloaders: map[string]repository.Downloader{ + "oci://example.com": &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + }, + }, + want: &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + }, + }, + { + name: "resolves oci repository from callback", + url: "oci://example.com", + getChartDownloaderCallback: func(_ string) (repository.Downloader, error) { + return &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com"}, + }, nil + }, + want: &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + }, + + wantDownloaders: map[string]repository.Downloader{ + "oci://example.com": &repository.OCIChartRepository{ + URL: url.URL{ + Scheme: "oci", + Host: "example.com", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + dm := &DependencyManager{ + downloaders: tt.downloaders, + getChartDownloaderCallback: tt.getChartDownloaderCallback, + } + + got, err := dm.resolveRepository(tt.url) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(got).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + if tt.wantDownloaders != nil { + g.Expect(dm.downloaders).To(Equal(tt.wantDownloaders)) + } + }) + } +} + +func TestDependencyManager_secureLocalChartPath(t *testing.T) { + tests := []struct { + name string + baseDir string + path string + dep *helmchart.Dependency + want string + wantErr string + }{ + { + name: "secure local file path", + baseDir: "/tmp/workdir", + path: "/chart", + dep: &helmchart.Dependency{ + Repository: "../dep", + }, + want: "/tmp/workdir/dep", + }, + { + name: "insecure local file path", + baseDir: "/tmp/workdir", + path: "/", + dep: &helmchart.Dependency{ + Repository: "/../../dep", + }, + want: "/tmp/workdir/dep", + }, + { + name: "URL parse error", + dep: &helmchart.Dependency{ + Repository: ": //example.com", + }, + wantErr: "missing protocol scheme", + }, + { + name: "error on URL scheme other than file", + dep: &helmchart.Dependency{ + Repository: "https://example.com", + }, + wantErr: "not a local chart reference", + }, + { + name: "local dependency with empty repository", + dep: &helmchart.Dependency{ + Name: "some-subchart", + }, + baseDir: "/tmp/workdir", + path: "/chart", + want: "/tmp/workdir/chart/charts/some-subchart", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + dm := NewDependencyManager() + got, err := dm.secureLocalChartPath(LocalReference{WorkDir: tt.baseDir, Path: tt.path}, tt.dep) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeEmpty()) + g.Expect(got).To(Equal(tt.want)) + }) + } +} + +func Test_collectMissing(t *testing.T) { + tests := []struct { + name string + current []*helmchart.Chart + reqs []*helmchart.Dependency + want map[string]*helmchart.Dependency + }{ + { + name: "one missing", + current: []*helmchart.Chart{}, + reqs: []*helmchart.Dependency{ + {Name: chartName}, + }, + want: map[string]*helmchart.Dependency{ + chartName: {Name: chartName}, + }, + }, + { + name: "alias missing", + current: []*helmchart.Chart{ + { + Metadata: &helmchart.Metadata{ + Name: chartName, + }, + }, + }, + reqs: []*helmchart.Dependency{ + {Name: chartName}, + {Name: chartName, Alias: chartName + "-alias"}, + }, + want: map[string]*helmchart.Dependency{ + chartName + "-alias": {Name: chartName, Alias: chartName + "-alias"}, + }, + }, + { + name: "all current", + current: []*helmchart.Chart{ + { + Metadata: &helmchart.Metadata{ + Name: chartName, + }, + }, + }, + reqs: []*helmchart.Dependency{ + {Name: chartName}, + }, + want: nil, + }, + { + name: "nil", + current: nil, + reqs: nil, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(collectMissing(tt.current, tt.reqs)).To(Equal(tt.want)) + }) + }) + } +} + +func Test_isLocalDep(t *testing.T) { + tests := []struct { + name string + dep *helmchart.Dependency + want bool + }{ + { + name: "file protocol", + dep: &helmchart.Dependency{Repository: "file:///some/path"}, + want: true, + }, + { + name: "empty", + dep: &helmchart.Dependency{Repository: ""}, + want: true, + }, + { + name: "https url", + dep: &helmchart.Dependency{Repository: "https://example.com"}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(isLocalDep(tt.dep)).To(Equal(tt.want)) + }) + } +} diff --git a/internal/helm/chart/errors.go b/internal/helm/chart/errors.go new file mode 100644 index 000000000..7b1b7f3b0 --- /dev/null +++ b/internal/helm/chart/errors.go @@ -0,0 +1,89 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "errors" + "fmt" +) + +// BuildErrorReason is the descriptive reason for a BuildError. +type BuildErrorReason struct { + // Reason is the programmatic build error reason in CamelCase. + Reason string + + // Summary is the human build error reason, used to provide + // the Error string, and further context to the BuildError. + Summary string +} + +// Error returns the string representation of BuildErrorReason. +func (e BuildErrorReason) Error() string { + return e.Summary +} + +// BuildError contains a wrapped Err and a Reason indicating why it occurred. +type BuildError struct { + Reason BuildErrorReason + Err error +} + +// Error returns Err as a string, prefixed with the Reason to provide context. +func (e *BuildError) Error() string { + if e.Reason.Error() == "" { + return e.Err.Error() + } + return fmt.Sprintf("%s: %s", e.Reason.Error(), e.Err.Error()) +} + +// Is returns true if the Reason or Err equals target. +// It can be used to programmatically place an arbitrary Err in the +// context of the Builder: +// +// err := &BuildError{Reason: ErrChartPull, Err: errors.New("arbitrary transport error")} +// errors.Is(err, ErrChartPull) +func (e *BuildError) Is(target error) bool { + if e.Reason == target { + return true + } + return errors.Is(e.Err, target) +} + +// Unwrap returns the underlying Err. +func (e *BuildError) Unwrap() error { + return e.Err +} + +func IsPersistentBuildErrorReason(err error) bool { + switch err { + case ErrChartReference, ErrChartMetadataPatch, ErrValuesFilesMerge: + return true + default: + return false + } +} + +var ( + ErrChartReference = BuildErrorReason{Reason: "InvalidChartReference", Summary: "invalid chart reference"} + ErrChartPull = BuildErrorReason{Reason: "ChartPullError", Summary: "chart pull error"} + ErrChartMetadataPatch = BuildErrorReason{Reason: "MetadataPatchError", Summary: "chart metadata patch error"} + ErrValuesFilesMerge = BuildErrorReason{Reason: "ValuesFilesError", Summary: "values files merge error"} + ErrDependencyBuild = BuildErrorReason{Reason: "DependencyBuildError", Summary: "dependency build error"} + ErrChartPackage = BuildErrorReason{Reason: "ChartPackageError", Summary: "chart package error"} + ErrChartVerification = BuildErrorReason{Reason: "ChartVerificationError", Summary: "chart verification error"} + ErrUnknown = BuildErrorReason{Reason: "Unknown", Summary: "unknown build error"} +) diff --git a/internal/helm/chart/errors_test.go b/internal/helm/chart/errors_test.go new file mode 100644 index 000000000..13428e6cd --- /dev/null +++ b/internal/helm/chart/errors_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" +) + +func TestBuildErrorReason_Error(t *testing.T) { + g := NewWithT(t) + + err := BuildErrorReason{"Reason", "reason"} + g.Expect(err.Error()).To(Equal("reason")) +} + +func TestBuildError_Error(t *testing.T) { + tests := []struct { + name string + err *BuildError + want string + }{ + { + name: "with reason", + err: &BuildError{ + Reason: BuildErrorReason{"Reason", "reason"}, + Err: errors.New("error"), + }, + want: "reason: error", + }, + { + name: "without reason", + err: &BuildError{ + Err: errors.New("error"), + }, + want: "error", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(tt.err.Error()).To(Equal(tt.want)) + }) + } +} + +func TestBuildError_Is(t *testing.T) { + g := NewWithT(t) + + wrappedErr := errors.New("wrapped") + err := &BuildError{ + Reason: ErrChartPackage, + Err: wrappedErr, + } + + g.Expect(err.Is(ErrChartPackage)).To(BeTrue()) + g.Expect(err.Is(wrappedErr)).To(BeTrue()) + g.Expect(err.Is(ErrDependencyBuild)).To(BeFalse()) +} + +func TestBuildError_Unwrap(t *testing.T) { + g := NewWithT(t) + + wrap := errors.New("wrapped") + err := BuildError{Err: wrap} + g.Expect(err.Unwrap()).To(Equal(wrap)) +} diff --git a/internal/helm/chart/metadata.go b/internal/helm/chart/metadata.go new file mode 100644 index 000000000..e3c91ac6b --- /dev/null +++ b/internal/helm/chart/metadata.go @@ -0,0 +1,254 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "strings" + + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "sigs.k8s.io/yaml" + + "github.com/fluxcd/source-controller/internal/helm" +) + +var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) + +// OverwriteChartDefaultValues overwrites the chart default values file with the given data. +func OverwriteChartDefaultValues(chart *helmchart.Chart, vals chartutil.Values) (bool, error) { + if vals == nil { + return false, nil + } + + var bVals bytes.Buffer + if len(vals) > 0 { + if err := vals.Encode(&bVals); err != nil { + return false, err + } + } + + // Replace current values file in Raw field + for _, f := range chart.Raw { + if f.Name == chartutil.ValuesfileName { + // Do nothing if contents are equal + if reflect.DeepEqual(f.Data, bVals.Bytes()) { + return false, nil + } + + // Replace in Files field + for _, f := range chart.Files { + if f.Name == chartutil.ValuesfileName { + f.Data = bVals.Bytes() + } + } + + f.Data = bVals.Bytes() + chart.Values = vals.AsMap() + return true, nil + } + } + + // This should never happen, helm charts must have a values.yaml file to be valid + return false, fmt.Errorf("failed to locate values file: %s", chartutil.ValuesfileName) +} + +// LoadChartMetadata attempts to load the chart.Metadata from the "Chart.yaml" file in the directory or archive at the +// given chartPath. It takes "requirements.yaml" files into account, and is therefore compatible with the +// chart.APIVersionV1 format. +func LoadChartMetadata(chartPath string) (meta *helmchart.Metadata, err error) { + i, err := os.Stat(chartPath) + if err != nil { + return nil, err + } + if i.IsDir() { + meta, err = LoadChartMetadataFromDir(chartPath) + return + } + meta, err = LoadChartMetadataFromArchive(chartPath) + return +} + +// LoadChartMetadataFromDir loads the chart.Metadata from the "Chart.yaml" file in the directory at the given path. +// It takes "requirements.yaml" files into account, and is therefore compatible with the chart.APIVersionV1 format. +func LoadChartMetadataFromDir(dir string) (*helmchart.Metadata, error) { + m := new(helmchart.Metadata) + + b, err := os.ReadFile(filepath.Join(dir, chartutil.ChartfileName)) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(b, m) + if err != nil { + return nil, fmt.Errorf("cannot load '%s': %w", chartutil.ChartfileName, err) + } + if m.APIVersion == "" { + m.APIVersion = helmchart.APIVersionV1 + } + + fp := filepath.Join(dir, "requirements.yaml") + stat, err := os.Stat(fp) + if (err != nil && !errors.Is(err, os.ErrNotExist)) || stat != nil { + if err != nil { + return nil, err + } + if stat.IsDir() { + return nil, fmt.Errorf("'%s' is a directory", stat.Name()) + } + if stat.Size() > helm.MaxChartFileSize { + return nil, fmt.Errorf("size of '%s' exceeds '%d' bytes limit", stat.Name(), helm.MaxChartFileSize) + } + } + + b, err = os.ReadFile(fp) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + if len(b) > 0 { + if err = yaml.Unmarshal(b, m); err != nil { + return nil, fmt.Errorf("cannot load 'requirements.yaml': %w", err) + } + } + return m, nil +} + +// LoadChartMetadataFromArchive loads the chart.Metadata from the "Chart.yaml" file in the archive at the given path. +// It takes "requirements.yaml" files into account, and is therefore compatible with the chart.APIVersionV1 format. +func LoadChartMetadataFromArchive(archive string) (*helmchart.Metadata, error) { + stat, err := os.Stat(archive) + if err != nil || stat.IsDir() { + if err == nil { + err = fmt.Errorf("'%s' is a directory", stat.Name()) + } + return nil, err + } + if stat.Size() > helm.MaxChartSize { + return nil, fmt.Errorf("size of chart '%s' exceeds '%d' bytes limit", stat.Name(), helm.MaxChartSize) + } + + f, err := os.Open(archive) + if err != nil { + return nil, err + } + defer f.Close() + + r := bufio.NewReader(f) + zr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + tr := tar.NewReader(zr) + + // The following logic is on par with how Helm validates the package while + // unpackaging it, except that we only read the Metadata related files. + // Ref: https://github.com/helm/helm/blob/a499b4b179307c267bdf3ec49b880e3dbd2a5591/pkg/chart/loader/archive.go#L104 + var m *helmchart.Metadata + for { + hd, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + if hd.FileInfo().IsDir() { + // Use this instead of hd.Typeflag because we don't have to do any + // inference chasing. + continue + } + + switch hd.Typeflag { + // We don't want to process these extension header files. + case tar.TypeXGlobalHeader, tar.TypeXHeader: + continue + } + + // Archive could contain \ if generated on Windows + delimiter := "/" + if strings.ContainsRune(hd.Name, '\\') { + delimiter = "\\" + } + parts := strings.Split(hd.Name, delimiter) + n := strings.Join(parts[1:], delimiter) + + // Normalize the path to the / delimiter + n = strings.ReplaceAll(n, delimiter, "/") + + if path.IsAbs(n) { + return nil, errors.New("chart illegally contains absolute paths") + } + + n = path.Clean(n) + if n == "." { + // In this case, the original path was relative when it should have been absolute. + return nil, fmt.Errorf("chart illegally contains content outside the base directory: %s", hd.Name) + } + if strings.HasPrefix(n, "..") { + return nil, fmt.Errorf("chart illegally references parent directory") + } + + // In some particularly arcane acts of path creativity, it is possible to intermix + // UNIX and Windows style paths in such a way that you produce a result of the form + // c:/foo even after all the built-in absolute path checks. So we explicitly check + // for this condition. + if drivePathPattern.MatchString(n) { + return nil, errors.New("chart contains illegally named files") + } + + // We are only interested in files in the base directory from here on + if len(parts) != 2 { + continue + } + + switch parts[1] { + case chartutil.ChartfileName, "requirements.yaml": + if hd.Size > helm.MaxChartFileSize { + return nil, fmt.Errorf("size of '%s' exceeds '%d' bytes limit", hd.Name, helm.MaxChartFileSize) + } + b, err := io.ReadAll(tr) + if err != nil { + return nil, err + } + if m == nil { + m = new(helmchart.Metadata) + } + err = yaml.Unmarshal(b, m) + if err != nil { + return nil, fmt.Errorf("cannot load '%s': %w", parts[1], err) + } + if m.APIVersion == "" { + m.APIVersion = helmchart.APIVersionV1 + } + } + } + if m == nil { + return nil, fmt.Errorf("no '%s' found", chartutil.ChartfileName) + } + return m, nil +} diff --git a/internal/helm/chart/metadata_test.go b/internal/helm/chart/metadata_test.go new file mode 100644 index 000000000..1c002a1df --- /dev/null +++ b/internal/helm/chart/metadata_test.go @@ -0,0 +1,263 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "os" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" + "github.com/otiai10/copy" + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + + "github.com/fluxcd/source-controller/internal/helm" +) + +var ( + // helmPackageFile contains the path to a Helm package in the v2 format + // without any dependencies + helmPackageFile = "../testdata/charts/helmchart-0.1.0.tgz" + chartName = "helmchart" + chartVersion = "0.1.0" + + // helmPackageV1File contains the path to a Helm package in the v1 format, + // including dependencies in a requirements.yaml file which should be + // loaded + helmPackageV1File = "../testdata/charts/helmchartwithdeps-v1-0.3.0.tgz" + chartNameV1 = "helmchartwithdeps-v1" + chartVersionV1 = "0.3.0" + + originalValuesFixture = []byte(`override: original +`) + chartFilesFixture = []*helmchart.File{ + { + Name: "values.yaml", + Data: originalValuesFixture, + }, + } + chartFixture = helmchart.Chart{ + Metadata: &helmchart.Metadata{ + Name: "test", + Version: "0.1.0", + }, + Raw: chartFilesFixture, + Files: chartFilesFixture, + } +) + +func TestOverwriteChartDefaultValues(t *testing.T) { + invalidChartFixture := chartFixture + invalidChartFixture.Raw = []*helmchart.File{} + invalidChartFixture.Files = []*helmchart.File{} + + testCases := []struct { + desc string + chart helmchart.Chart + data []byte + ok bool + expectErr bool + }{ + { + desc: "invalid chart", + chart: invalidChartFixture, + data: originalValuesFixture, + expectErr: true, + }, + { + desc: "identical override", + chart: chartFixture, + data: originalValuesFixture, + }, + { + desc: "valid override", + chart: chartFixture, + ok: true, + data: []byte(`override: test +`), + }, + { + desc: "empty override", + chart: chartFixture, + ok: true, + data: []byte(``), + }, + } + for _, tt := range testCases { + t.Run(tt.desc, func(t *testing.T) { + g := NewWithT(t) + + fixture := tt.chart + vals, err := chartutil.ReadValues(tt.data) + g.Expect(err).ToNot(HaveOccurred()) + ok, err := OverwriteChartDefaultValues(&fixture, vals) + g.Expect(ok).To(Equal(tt.ok)) + + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(ok).To(Equal(tt.ok)) + return + } + + if tt.ok { + for _, f := range fixture.Raw { + if f.Name == chartutil.ValuesfileName { + g.Expect(f.Data).To(Equal(tt.data)) + } + } + for _, f := range fixture.Files { + if f.Name == chartutil.ValuesfileName { + g.Expect(f.Data).To(Equal(tt.data)) + } + } + } + }) + } +} + +func TestLoadChartMetadataFromDir(t *testing.T) { + g := NewWithT(t) + + // Create a chart file that exceeds the max chart file size. + tmpDir := t.TempDir() + copy.Copy("../testdata/charts/helmchart", tmpDir) + bigRequirementsFile := filepath.Join(tmpDir, "requirements.yaml") + data := make([]byte, helm.MaxChartFileSize+10) + g.Expect(os.WriteFile(bigRequirementsFile, data, 0o640)).ToNot(HaveOccurred()) + + tests := []struct { + name string + dir string + wantName string + wantVersion string + wantDependencyCount int + wantErr string + }{ + { + name: "Loads from dir", + dir: "../testdata/charts/helmchart", + wantName: "helmchart", + wantVersion: "0.1.0", + }, + { + name: "Loads from v1 dir including requirements.yaml", + dir: "../testdata/charts/helmchartwithdeps-v1", + wantName: chartNameV1, + wantVersion: chartVersionV1, + wantDependencyCount: 1, + }, + { + name: "Error if no Chart.yaml", + dir: "../testdata/charts/", + wantErr: "../testdata/charts/Chart.yaml: no such file or directory", + }, + { + name: "Error if file size exceeds max size", + dir: tmpDir, + wantErr: "size of 'requirements.yaml' exceeds", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := LoadChartMetadataFromDir(tt.dir) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(got).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got.Validate()).To(Succeed()) + g.Expect(got.Name).To(Equal(tt.wantName)) + g.Expect(got.Version).To(Equal(tt.wantVersion)) + g.Expect(got.Dependencies).To(HaveLen(tt.wantDependencyCount)) + }) + } +} + +func TestLoadChartMetadataFromArchive(t *testing.T) { + g := NewWithT(t) + + // Create a chart archive that exceeds the max chart size. + tmpDir := t.TempDir() + bigArchiveFile := filepath.Join(tmpDir, "chart.tgz") + data := make([]byte, helm.MaxChartSize+10) + g.Expect(os.WriteFile(bigArchiveFile, data, 0o640)).ToNot(HaveOccurred()) + + tests := []struct { + name string + archive string + wantName string + wantVersion string + wantDependencyCount int + wantErr string + }{ + { + name: "Loads from archive", + archive: helmPackageFile, + wantName: chartName, + wantVersion: chartVersion, + }, + { + name: "Loads from v1 archive including requirements.yaml", + archive: helmPackageV1File, + wantName: chartNameV1, + wantVersion: chartVersionV1, + wantDependencyCount: 1, + }, + { + name: "Error on not found", + archive: "../testdata/invalid.tgz", + wantErr: "no such file or directory", + }, + { + name: "Error if no Chart.yaml", + archive: "../testdata/charts/empty.tgz", + wantErr: "no 'Chart.yaml' found", + }, + { + name: "Error if archive size exceeds max size", + archive: bigArchiveFile, + wantErr: "size of chart 'chart.tgz' exceeds", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := LoadChartMetadataFromArchive(tt.archive) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(got).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got.Validate()).To(Succeed()) + g.Expect(got.Name).To(Equal(tt.wantName)) + g.Expect(got.Version).To(Equal(tt.wantVersion)) + g.Expect(got.Dependencies).To(HaveLen(tt.wantDependencyCount)) + }) + } +} diff --git a/internal/helm/chart/secureloader/directory.go b/internal/helm/chart/secureloader/directory.go new file mode 100644 index 000000000..90285758b --- /dev/null +++ b/internal/helm/chart/secureloader/directory.go @@ -0,0 +1,253 @@ +/* +Copyright The Helm Authors. +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file has been derived from +https://github.com/helm/helm/blob/v3.8.1/pkg/chart/loader/directory.go. + +It has been modified to not blindly accept any resolved symlink path, but +instead check it against the configured root before allowing it to be included. +It also allows for capping the size of any file loaded into the chart. +*/ + +package secureloader + +import ( + "bytes" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + securejoin "github.com/cyphar/filepath-securejoin" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + + "github.com/fluxcd/source-controller/internal/helm" + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader/ignore" + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader/sympath" +) + +var ( + utf8bom = []byte{0xEF, 0xBB, 0xBF} +) + +// SecureDirLoader securely loads a chart from a directory while resolving +// symlinks without including files outside root. +type SecureDirLoader struct { + root string + path string + maxSize int64 +} + +// NewSecureDirLoader returns a new SecureDirLoader, configured to the scope of the +// root and provided dir. Max size configures the maximum size a file must not +// exceed to be loaded. If 0 it defaults to helm.MaxChartFileSize, it can be +// disabled using a negative integer. +func NewSecureDirLoader(root string, path string, maxSize int64) SecureDirLoader { + if maxSize == 0 { + maxSize = helm.MaxChartFileSize + } + return SecureDirLoader{ + root: root, + path: path, + maxSize: maxSize, + } +} + +// Load loads and returns the chart.Chart, or an error. +func (l SecureDirLoader) Load() (*chart.Chart, error) { + return SecureLoadDir(l.root, l.path, l.maxSize) +} + +// SecureLoadDir securely loads a chart from the path relative to root, without +// traversing outside root. When maxSize >= 0, files are not allowed to exceed +// this size, or an error is returned. +func SecureLoadDir(root, path string, maxSize int64) (*chart.Chart, error) { + root, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + // Ensure path is relative + if filepath.IsAbs(path) { + relChartPath, err := filepath.Rel(root, path) + if err != nil { + return nil, err + } + path = relChartPath + } + + // Resolve secure absolute path + absChartName, err := securejoin.SecureJoin(root, path) + if err != nil { + return nil, err + } + + // Load ignore rules + rules, err := secureLoadIgnoreRules(root, path) + if err != nil { + return nil, fmt.Errorf("cannot load ignore rules for chart: %w", err) + } + + // Lets go for a walk... + fileWalker := newSecureFileWalker(root, absChartName, maxSize, rules) + if err = sympath.Walk(fileWalker.absChartPath, fileWalker.walk); err != nil { + return nil, fmt.Errorf("failed to load files from %s: %w", strings.TrimPrefix(fileWalker.absChartPath, fileWalker.root), err) + } + + loaded, err := loader.LoadFiles(fileWalker.files) + if err != nil { + return nil, fmt.Errorf("failed to load chart from %s: %w", strings.TrimPrefix(fileWalker.absChartPath, fileWalker.root), err) + } + return loaded, nil +} + +// secureLoadIgnoreRules attempts to load the ignore.HelmIgnore file from the +// chart path relative to root. If the file is a symbolic link, it is evaluated +// with the given root treated as root of the filesystem. +// If the ignore file does not exist, or points to a location outside of root, +// default ignore.Rules are returned. Any error other than fs.ErrNotExist is +// returned. +func secureLoadIgnoreRules(root, chartPath string) (*ignore.Rules, error) { + rules := ignore.Empty() + + iFile, err := securejoin.SecureJoin(root, filepath.Join(chartPath, ignore.HelmIgnore)) + if err != nil { + return nil, err + } + _, err = os.Stat(iFile) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + if err == nil { + if rules, err = ignore.ParseFile(iFile); err != nil { + return nil, err + } + } + + rules.AddDefaults() + return rules, nil +} + +// secureFileWalker does the actual walking over the directory, any file loaded +// by walk is appended to files. +type secureFileWalker struct { + root string + absChartPath string + maxSize int64 + rules *ignore.Rules + files []*loader.BufferedFile +} + +func newSecureFileWalker(root, absChartPath string, maxSize int64, rules *ignore.Rules) *secureFileWalker { + absChartPath = filepath.Clean(absChartPath) + string(filepath.Separator) + return &secureFileWalker{ + root: root, + absChartPath: absChartPath, + maxSize: maxSize, + rules: rules, + files: make([]*loader.BufferedFile, 0), + } +} + +func (w *secureFileWalker) walk(name, absName string, fi os.FileInfo, err error) error { + n := strings.TrimPrefix(name, w.absChartPath) + if n == "" { + // No need to process top level. Avoid bug with helmignore .* matching + // empty names. See issue 1779. + return nil + } + + if err != nil { + return err + } + + // Normalize to / since it will also work on Windows + n = filepath.ToSlash(n) + + if fi.IsDir() { + // Directory-based ignore rules should involve skipping the entire + // contents of that directory. + if w.rules.Ignore(n, fi) { + return filepath.SkipDir + } + // Check after excluding ignores to provide the user with an option + // to opt-out from including certain paths. + if _, err := isSecureAbsolutePath(w.root, absName); err != nil { + return fmt.Errorf("cannot load '%s' directory: %w", n, err) + } + return nil + } + + // If a .helmignore file matches, skip this file. + if w.rules.Ignore(n, fi) { + return nil + } + + // Check after excluding ignores to provide the user with an option + // to opt-out from including certain paths. + if _, err := isSecureAbsolutePath(w.root, absName); err != nil { + return fmt.Errorf("cannot load '%s' file: %w", n, err) + } + + // Irregular files include devices, sockets, and other uses of files that + // are not regular files. In Go they have a file mode type bit set. + // See https://golang.org/pkg/os/#FileMode for examples. + if !fi.Mode().IsRegular() { + return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", n) + } + + // Confirm size it not outside boundaries + if fileSize := fi.Size(); w.maxSize > 0 && fileSize > w.maxSize { + return fmt.Errorf("cannot load file %s as file size (%d) exceeds limit (%d)", n, fileSize, w.maxSize) + } + + data, err := os.ReadFile(absName) + if err != nil { + if pathErr := new(fs.PathError); errors.As(err, &pathErr) { + err = &fs.PathError{Op: pathErr.Op, Path: strings.TrimPrefix(absName, w.root), Err: pathErr.Err} + } + return fmt.Errorf("error reading %s: %w", n, err) + } + data = bytes.TrimPrefix(data, utf8bom) + + w.files = append(w.files, &loader.BufferedFile{Name: n, Data: data}) + return nil +} + +// isSecureAbsolutePath attempts to make the given absolute path relative to +// root and securely joins this with root. If the result equals absolute path, +// it is safe to use. +func isSecureAbsolutePath(root, absPath string) (bool, error) { + root, absPath = filepath.Clean(root), filepath.Clean(absPath) + if root == "/" { + return true, nil + } + unsafePath, err := filepath.Rel(root, absPath) + if err != nil { + return false, fmt.Errorf("cannot calculate path relative to root for absolute path") + } + safePath, err := securejoin.SecureJoin(root, unsafePath) + if err != nil { + return false, fmt.Errorf("cannot securely join root with resolved relative path") + } + if safePath != absPath { + return false, fmt.Errorf("absolute path traverses outside root boundary: relative path to root %s", unsafePath) + } + return true, nil +} diff --git a/internal/helm/chart/secureloader/directory_test.go b/internal/helm/chart/secureloader/directory_test.go new file mode 100644 index 000000000..5dacfc7d8 --- /dev/null +++ b/internal/helm/chart/secureloader/directory_test.go @@ -0,0 +1,421 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secureloader + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + "testing/fstest" + + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/chart" + "sigs.k8s.io/yaml" + + "github.com/fluxcd/source-controller/internal/helm" + "github.com/fluxcd/source-controller/internal/helm/chart/secureloader/ignore" +) + +func TestSecureDirLoader_Load(t *testing.T) { + metadata := chart.Metadata{ + Name: "test", + APIVersion: "v2", + Version: "1.0", + Type: "application", + } + + t.Run("chart", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + m := metadata + b, err := yaml.Marshal(&m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "Chart.yaml"), b, 0o640)).To(Succeed()) + + got, err := (NewSecureDirLoader(tmpDir, "", helm.MaxChartFileSize)).Load() + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got.Name()).To(Equal(m.Name)) + }) + + t.Run("chart with absolute path", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + m := metadata + b, err := yaml.Marshal(&m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "Chart.yaml"), b, 0o640)).To(Succeed()) + + got, err := (NewSecureDirLoader(tmpDir, tmpDir, helm.MaxChartFileSize)).Load() + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got.Name()).To(Equal(m.Name)) + }) + + t.Run("chart with illegal path", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + + m := metadata + b, err := yaml.Marshal(&m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "Chart.yaml"), b, 0o640)).To(Succeed()) + + root := filepath.Join(tmpDir, "root") + g.Expect(os.Mkdir(root, 0o700)).To(Succeed()) + + got, err := (NewSecureDirLoader(root, "../", helm.MaxChartFileSize)).Load() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("failed to load chart from /: Chart.yaml file is missing")) + g.Expect(got).To(BeNil()) + + got, err = (NewSecureDirLoader(root, tmpDir, helm.MaxChartFileSize)).Load() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("failed to load chart from /: Chart.yaml file is missing")) + g.Expect(got).To(BeNil()) + }) + + t.Run("chart with .helmignore", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + m := metadata + b, err := yaml.Marshal(&m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "Chart.yaml"), b, 0o640)).To(Succeed()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, ignore.HelmIgnore), []byte("file.txt"), 0o640)).To(Succeed()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "file.txt"), []byte("not included"), 0o640)).To(Succeed()) + + got, err := (NewSecureDirLoader(tmpDir, "", helm.MaxChartFileSize)).Load() + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got.Name()).To(Equal(m.Name)) + g.Expect(got.Raw).To(HaveLen(2)) + }) +} + +func Test_secureLoadIgnoreRules(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + g := NewWithT(t) + + r, err := secureLoadIgnoreRules("/workdir", "") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Ignore("file.txt", nil)).To(BeFalse()) + g.Expect(r.Ignore("templates/.dotfile", nil)).To(BeTrue()) + }) + + t.Run("with "+ignore.HelmIgnore, func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + g.Expect(os.WriteFile(filepath.Join(tmpDir, ignore.HelmIgnore), []byte("file.txt"), 0o640)).To(Succeed()) + + r, err := secureLoadIgnoreRules(tmpDir, "") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Ignore("file.txt", nil)).To(BeTrue()) + g.Expect(r.Ignore("templates/.dotfile", nil)).To(BeTrue()) + g.Expect(r.Ignore("other.txt", nil)).To(BeFalse()) + }) + + t.Run("with chart path and "+ignore.HelmIgnore, func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + chartPath := "./sub/chart" + g.Expect(os.MkdirAll(filepath.Join(tmpDir, chartPath), 0o700)).To(Succeed()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, chartPath, ignore.HelmIgnore), []byte("file.txt"), 0o640)).To(Succeed()) + + r, err := secureLoadIgnoreRules(tmpDir, chartPath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Ignore("file.txt", nil)).To(BeTrue()) + }) + + t.Run("with relative "+ignore.HelmIgnore+" symlink", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + chartPath := "sub/chart" + g.Expect(os.MkdirAll(filepath.Join(tmpDir, chartPath), 0o700)).To(Succeed()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "symlink"), []byte("file.txt"), 0o640)).To(Succeed()) + g.Expect(os.Symlink("../../symlink", filepath.Join(tmpDir, chartPath, ignore.HelmIgnore))) + + r, err := secureLoadIgnoreRules(tmpDir, chartPath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Ignore("file.txt", nil)).To(BeTrue()) + }) + + t.Run("with illegal "+ignore.HelmIgnore+" symlink", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + chartPath := "/sub/chart" + g.Expect(os.MkdirAll(filepath.Join(tmpDir, chartPath), 0o700)).To(Succeed()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "symlink"), []byte("file.txt"), 0o640)).To(Succeed()) + g.Expect(os.Symlink("../../symlink", filepath.Join(tmpDir, chartPath, ignore.HelmIgnore))) + + r, err := secureLoadIgnoreRules(filepath.Join(tmpDir, chartPath), "") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Ignore("templates/.dotfile", nil)).To(BeTrue()) + g.Expect(r.Ignore("file.txt", nil)).To(BeFalse()) + }) + + t.Run("with "+ignore.HelmIgnore+" parsing error", func(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + g.Expect(os.WriteFile(filepath.Join(tmpDir, ignore.HelmIgnore), []byte("**"), 0o640)).To(Succeed()) + + _, err := secureLoadIgnoreRules(tmpDir, "") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("syntax is not supported")) + }) +} + +func Test_secureFileWalker_walk(t *testing.T) { + g := NewWithT(t) + + const ( + root = "/fake/root" + chartPath = "/fake/root/dir" + ) + + fakeDirName := "fake-dir" + fakeFileName := "fake-file" + fakeDeviceFileName := "fake-device" + fakeFS := fstest.MapFS{ + fakeDirName: &fstest.MapFile{Mode: fs.ModeDir}, + fakeFileName: &fstest.MapFile{Data: []byte("a couple bytes")}, + fakeDeviceFileName: &fstest.MapFile{Mode: fs.ModeDevice}, + } + + // Safe to further re-use this for other paths + fakeDirInfo, err := fakeFS.Stat(fakeDirName) + g.Expect(err).ToNot(HaveOccurred()) + fakeFileInfo, err := fakeFS.Stat(fakeFileName) + g.Expect(err).ToNot(HaveOccurred()) + fakeDeviceInfo, err := fakeFS.Stat(fakeDeviceFileName) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("given name equals top dir", func(t *testing.T) { + g := NewWithT(t) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, ignore.Empty()) + g.Expect(w.walk(chartPath+"/", chartPath, nil, nil)).To(BeNil()) + }) + + t.Run("given error is returned", func(t *testing.T) { + g := NewWithT(t) + + err := errors.New("error argument") + got := (&secureFileWalker{}).walk("name", "/name", nil, err) + g.Expect(got).To(HaveOccurred()) + g.Expect(got).To(Equal(err)) + }) + + t.Run("ignore rule matches dir", func(t *testing.T) { + g := NewWithT(t) + + rules, err := ignore.Parse(strings.NewReader(fakeDirName + "/")) + g.Expect(err).ToNot(HaveOccurred()) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, rules) + g.Expect(w.walk(filepath.Join(w.absChartPath, fakeDirName), filepath.Join(w.absChartPath, fakeDirName), fakeDirInfo, nil)).To(Equal(fs.SkipDir)) + }) + + t.Run("absolute path match ignored", func(t *testing.T) { + g := NewWithT(t) + + rules, err := ignore.Parse(strings.NewReader(fakeDirName + "/")) + g.Expect(err).ToNot(HaveOccurred()) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, rules) + g.Expect(w.walk(filepath.Join(w.absChartPath, "symlink"), filepath.Join(w.absChartPath, fakeDirName), fakeDirInfo, nil)).To(BeNil()) + }) + + t.Run("ignore rule not applicable to dir", func(t *testing.T) { + g := NewWithT(t) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, ignore.Empty()) + g.Expect(w.walk(filepath.Join(w.absChartPath, fakeDirName), filepath.Join(w.absChartPath, fakeDirName), fakeDirInfo, nil)).To(BeNil()) + }) + + t.Run("absolute path outside root", func(t *testing.T) { + g := NewWithT(t) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, ignore.Empty()) + err := w.walk(filepath.Join(w.absChartPath, fakeDirName), filepath.Join("/fake/another/root/", fakeDirName), fakeDirInfo, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("cannot load 'fake-dir' directory: absolute path traverses outside root boundary")) + }) + + t.Run("dir ignore rules before secure path check", func(t *testing.T) { + g := NewWithT(t) + + rules, err := ignore.Parse(strings.NewReader(fakeDirName + "/")) + g.Expect(err).ToNot(HaveOccurred()) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, rules) + g.Expect(w.walk(filepath.Join(w.absChartPath, fakeDirName), filepath.Join("/fake/another/root/", fakeDirName), fakeDirInfo, nil)).To(Equal(fs.SkipDir)) + }) + + t.Run("ignore rule matches file", func(t *testing.T) { + g := NewWithT(t) + + rules, err := ignore.Parse(strings.NewReader(fakeFileName)) + g.Expect(err).ToNot(HaveOccurred()) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, rules) + g.Expect(w.walk(filepath.Join(w.absChartPath, fakeFileName), filepath.Join(w.absChartPath, fakeFileName), fakeFileInfo, nil)).To(BeNil()) + }) + + t.Run("file path outside root", func(t *testing.T) { + g := NewWithT(t) + + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, ignore.Empty()) + err := w.walk(filepath.Join(w.absChartPath, fakeFileName), filepath.Join("/fake/another/root/", fakeFileName), fakeFileInfo, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("cannot load 'fake-file' file: absolute path traverses outside root boundary")) + }) + + t.Run("irregular file", func(t *testing.T) { + w := newSecureFileWalker(root, chartPath, helm.MaxChartFileSize, ignore.Empty()) + err := w.walk(fakeDeviceFileName, filepath.Join(w.absChartPath), fakeDeviceInfo, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("cannot load irregular file fake-device as it has file mode type bits set")) + }) + + t.Run("file exceeds max size", func(t *testing.T) { + w := newSecureFileWalker(root, chartPath, 5, ignore.Empty()) + err := w.walk(fakeFileName, filepath.Join(w.absChartPath), fakeFileInfo, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("cannot load file fake-file as file size (%d) exceeds limit (%d)", fakeFileInfo.Size(), w.maxSize))) + }) + + t.Run("file is appended", func(t *testing.T) { + g := NewWithT(t) + tmpDir := t.TempDir() + + fileName := "append-file" + fileData := []byte("append-file-data") + absFilePath := filepath.Join(tmpDir, fileName) + g.Expect(os.WriteFile(absFilePath, fileData, 0o640)).To(Succeed()) + fileInfo, err := os.Lstat(absFilePath) + g.Expect(err).ToNot(HaveOccurred()) + + w := newSecureFileWalker(tmpDir, tmpDir, helm.MaxChartFileSize, ignore.Empty()) + g.Expect(w.walk(fileName, absFilePath, fileInfo, nil)).To(Succeed()) + g.Expect(w.files).To(HaveLen(1)) + g.Expect(w.files[0].Name).To(Equal(fileName)) + g.Expect(w.files[0].Data).To(Equal(fileData)) + }) + + t.Run("utf8bom is removed from file data", func(t *testing.T) { + g := NewWithT(t) + tmpDir := t.TempDir() + + fileName := "append-file" + fileData := []byte("append-file-data") + fileDataWithBom := append(utf8bom, fileData...) + absFilePath := filepath.Join(tmpDir, fileName) + g.Expect(os.WriteFile(absFilePath, fileDataWithBom, 0o640)).To(Succeed()) + fileInfo, err := os.Lstat(absFilePath) + g.Expect(err).ToNot(HaveOccurred()) + + w := newSecureFileWalker(tmpDir, tmpDir, helm.MaxChartFileSize, ignore.Empty()) + g.Expect(w.walk(fileName, absFilePath, fileInfo, nil)).To(Succeed()) + g.Expect(w.files).To(HaveLen(1)) + g.Expect(w.files[0].Name).To(Equal(fileName)) + g.Expect(w.files[0].Data).To(Equal(fileData)) + }) + + t.Run("file does not exist", func(t *testing.T) { + g := NewWithT(t) + tmpDir := t.TempDir() + + w := newSecureFileWalker(tmpDir, tmpDir, helm.MaxChartFileSize, ignore.Empty()) + err := w.walk(filepath.Join(w.absChartPath, "invalid"), filepath.Join(w.absChartPath, "invalid"), fakeFileInfo, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, fs.ErrNotExist)).To(BeTrue()) + g.Expect(err.Error()).To(ContainSubstring("error reading invalid: open /invalid: no such file or directory")) + }) +} + +func Test_isSecureAbsolutePath(t *testing.T) { + tests := []struct { + name string + root string + absPath string + safe bool + wantErr string + }{ + { + name: "absolute path in root", + root: "/", + absPath: "/bar/", + safe: true, + }, + + { + name: "abs path not relative to root", + root: "/working/dir", + absPath: "/working/in/another/dir", + safe: false, + wantErr: "absolute path traverses outside root boundary", + }, + { + name: "abs path relative to root", + root: "/working/dir/", + absPath: "/working/dir/path", + safe: true, + }, + { + name: "illegal abs path", + root: "/working/dir", + absPath: "/working/dir/../but/not/really", + safe: false, + wantErr: "absolute path traverses outside root boundary", + }, + { + name: "illegal root", + root: "working/dir/", + absPath: "/working/dir", + safe: false, + wantErr: "cannot calculate path relative to root for absolute path", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := isSecureAbsolutePath(tt.root, tt.absPath) + g.Expect(got).To(Equal(tt.safe)) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} diff --git a/internal/helm/chart/secureloader/file.go b/internal/helm/chart/secureloader/file.go new file mode 100644 index 000000000..ce42e4ed2 --- /dev/null +++ b/internal/helm/chart/secureloader/file.go @@ -0,0 +1,47 @@ +/* +Copyright The Helm Authors. +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secureloader + +import ( + "io" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// FileLoader is equal to Helm's. +// Redeclared to avoid having to deal with multiple package imports, +// possibly resulting in using the non-secure directory loader. +type FileLoader = loader.FileLoader + +// LoadFile loads from an archive file. +func LoadFile(name string) (*chart.Chart, error) { + return loader.LoadFile(name) +} + +// LoadArchiveFiles reads in files out of an archive into memory. This function +// performs important path security checks and should always be used before +// expanding a tarball +func LoadArchiveFiles(in io.Reader) ([]*loader.BufferedFile, error) { + return loader.LoadArchiveFiles(in) +} + +// LoadArchive loads from a reader containing a compressed tar archive. +func LoadArchive(in io.Reader) (*chart.Chart, error) { + return loader.LoadArchive(in) +} diff --git a/internal/helm/chart/secureloader/ignore/doc.go b/internal/helm/chart/secureloader/ignore/doc.go new file mode 100644 index 000000000..16c9a79e8 --- /dev/null +++ b/internal/helm/chart/secureloader/ignore/doc.go @@ -0,0 +1,68 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ignore provides tools for writing ignore files (a la .gitignore). + +This provides both an ignore parser and a file-aware processor. + +The format of ignore files closely follows, but does not exactly match, the +format for .gitignore files (https://git-scm.com/docs/gitignore). + +The formatting rules are as follows: + + - Parsing is line-by-line + - Empty lines are ignored + - Lines the begin with # (comments) will be ignored + - Leading and trailing spaces are always ignored + - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) + - There is no support for multi-line patterns + - Shell glob patterns are supported. See Go's "path/filepath".Match + - If a pattern begins with a leading !, the match will be negated. + - If a pattern begins with a leading /, only paths relatively rooted will match. + - If the pattern ends with a trailing /, only directories will match + - If a pattern contains no slashes, file basenames are tested (not paths) + - The pattern sequence "**", while legal in a glob, will cause an error here + (to indicate incompatibility with .gitignore). + +Example: + + # Match any file named foo.txt + foo.txt + + # Match any text file + *.txt + + # Match only directories named mydir + mydir/ + + # Match only text files in the top-level directory + /*.txt + + # Match only the file foo.txt in the top-level directory + /foo.txt + + # Match any file named ab.txt, ac.txt, or ad.txt + a[b-d].txt + +Notable differences from .gitignore: + - The '**' syntax is not supported. + - The globbing library is Go's 'filepath.Match', not fnmatch(3) + - Trailing spaces are always ignored (there is no supported escape sequence) + - The evaluation of escape sequences has not been tested for compatibility + - There is no support for '\!' as a special leading sequence. +*/ +package ignore diff --git a/internal/helm/chart/secureloader/ignore/rules.go b/internal/helm/chart/secureloader/ignore/rules.go new file mode 100644 index 000000000..d8054b44d --- /dev/null +++ b/internal/helm/chart/secureloader/ignore/rules.go @@ -0,0 +1,227 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ignore + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" + "os" + "path/filepath" + "strings" +) + +// HelmIgnore default name of an ignorefile. +const HelmIgnore = ".helmignore" + +// Rules is a collection of path matching rules. +// +// Parse() and ParseFile() will construct and populate new Rules. +// Empty() will create an immutable empty ruleset. +type Rules struct { + patterns []*pattern +} + +// Empty builds an empty ruleset. +func Empty() *Rules { + return &Rules{patterns: []*pattern{}} +} + +// AddDefaults adds default ignore patterns. +// +// Ignore all dotfiles in "templates/" +func (r *Rules) AddDefaults() { + r.parseRule(`templates/.?*`) +} + +// ParseFile parses a helmignore file and returns the *Rules. +func ParseFile(file string) (*Rules, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + return Parse(f) +} + +// Parse parses a rules file +func Parse(file io.Reader) (*Rules, error) { + r := &Rules{patterns: []*pattern{}} + + s := bufio.NewScanner(file) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for s.Scan() { + scannedBytes := s.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + line := string(scannedBytes) + currentLine++ + + if err := r.parseRule(line); err != nil { + return r, err + } + } + return r, s.Err() +} + +// Ignore evaluates the file at the given path, and returns true if it should be ignored. +// +// Ignore evaluates path against the rules in order. Evaluation stops when a match +// is found. Matching a negative rule will stop evaluation. +func (r *Rules) Ignore(path string, fi os.FileInfo) bool { + // Don't match on empty dirs. + if path == "" { + return false + } + + // Disallow ignoring the current working directory. + // See issue: + // 1776 (New York City) Hamilton: "Pardon me, are you Aaron Burr, sir?" + if path == "." || path == "./" { + return false + } + for _, p := range r.patterns { + if p.match == nil { + log.Printf("ignore: no matcher supplied for %q", p.raw) + return false + } + + // For negative rules, we need to capture and return non-matches, + // and continue for matches. + if p.negate { + if p.mustDir && !fi.IsDir() { + return true + } + if !p.match(path, fi) { + return true + } + continue + } + + // If the rule is looking for directories, and this is not a directory, + // skip it. + if p.mustDir && !fi.IsDir() { + continue + } + if p.match(path, fi) { + return true + } + } + return false +} + +// parseRule parses a rule string and creates a pattern, which is then stored in the Rules object. +func (r *Rules) parseRule(rule string) error { + rule = strings.TrimSpace(rule) + + // Ignore blank lines + if rule == "" { + return nil + } + // Comment + if strings.HasPrefix(rule, "#") { + return nil + } + + // Fail any rules that contain ** + if strings.Contains(rule, "**") { + return errors.New("double-star (**) syntax is not supported") + } + + // Fail any patterns that can't compile. A non-empty string must be + // given to Match() to avoid optimization that skips rule evaluation. + if _, err := filepath.Match(rule, "abc"); err != nil { + return err + } + + p := &pattern{raw: rule} + + // Negation is handled at a higher level, so strip the leading ! from the + // string. + if strings.HasPrefix(rule, "!") { + p.negate = true + rule = rule[1:] + } + + // Directory verification is handled by a higher level, so the trailing / + // is removed from the rule. That way, a directory named "foo" matches, + // even if the supplied string does not contain a literal slash character. + if strings.HasSuffix(rule, "/") { + p.mustDir = true + rule = strings.TrimSuffix(rule, "/") + } + + if strings.HasPrefix(rule, "/") { + // Require path matches the root path. + p.match = func(n string, fi os.FileInfo) bool { + rule = strings.TrimPrefix(rule, "/") + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } else if strings.Contains(rule, "/") { + // require structural match. + p.match = func(n string, fi os.FileInfo) bool { + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } else { + p.match = func(n string, fi os.FileInfo) bool { + // When there is no slash in the pattern, we evaluate ONLY the + // filename. + n = filepath.Base(n) + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } + + r.patterns = append(r.patterns, p) + return nil +} + +// matcher is a function capable of computing a match. +// +// It returns true if the rule matches. +type matcher func(name string, fi os.FileInfo) bool + +// pattern describes a pattern to be matched in a rule set. +type pattern struct { + // raw is the unparsed string, with nothing stripped. + raw string + // match is the matcher function. + match matcher + // negate indicates that the rule's outcome should be negated. + negate bool + // mustDir indicates that the matched file must be a directory. + mustDir bool +} diff --git a/internal/helm/chart/secureloader/ignore/rules_test.go b/internal/helm/chart/secureloader/ignore/rules_test.go new file mode 100644 index 000000000..9581cf09f --- /dev/null +++ b/internal/helm/chart/secureloader/ignore/rules_test.go @@ -0,0 +1,155 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ignore + +import ( + "bytes" + "os" + "path/filepath" + "testing" +) + +var testdata = "./testdata" + +func TestParse(t *testing.T) { + rules := `#ignore + + #ignore +foo +bar/* +baz/bar/foo.txt + +one/more +` + r, err := parseString(rules) + if err != nil { + t.Fatalf("Error parsing rules: %s", err) + } + + if len(r.patterns) != 4 { + t.Errorf("Expected 4 rules, got %d", len(r.patterns)) + } + + expects := []string{"foo", "bar/*", "baz/bar/foo.txt", "one/more"} + for i, p := range r.patterns { + if p.raw != expects[i] { + t.Errorf("Expected %q, got %q", expects[i], p.raw) + } + if p.match == nil { + t.Errorf("Expected %s to have a matcher function.", p.raw) + } + } +} + +func TestParseFail(t *testing.T) { + shouldFail := []string{"foo/**/bar", "[z-"} + for _, fail := range shouldFail { + _, err := parseString(fail) + if err == nil { + t.Errorf("Rule %q should have failed", fail) + } + } +} + +func TestParseFile(t *testing.T) { + f := filepath.Join(testdata, HelmIgnore) + if _, err := os.Stat(f); err != nil { + t.Fatalf("Fixture %s missing: %s", f, err) + } + + r, err := ParseFile(f) + if err != nil { + t.Fatalf("Failed to parse rules file: %s", err) + } + + if len(r.patterns) != 3 { + t.Errorf("Expected 3 patterns, got %d", len(r.patterns)) + } +} + +func TestIgnore(t *testing.T) { + // Test table: Given pattern and name, Ignore should return expect. + tests := []struct { + pattern string + name string + expect bool + }{ + // Glob tests + {`helm.txt`, "helm.txt", true}, + {`helm.*`, "helm.txt", true}, + {`helm.*`, "rudder.txt", false}, + {`*.txt`, "tiller.txt", true}, + {`*.txt`, "cargo/a.txt", true}, + {`cargo/*.txt`, "cargo/a.txt", true}, + {`cargo/*.*`, "cargo/a.txt", true}, + {`cargo/*.txt`, "mast/a.txt", false}, + {`ru[c-e]?er.txt`, "rudder.txt", true}, + {`templates/.?*`, "templates/.dotfile", true}, + // "." should never get ignored. https://github.com/helm/helm/issues/1776 + {`.*`, ".", false}, + {`.*`, "./", false}, + {`.*`, ".joonix", true}, + {`.*`, "helm.txt", false}, + {`.*`, "", false}, + + // Directory tests + {`cargo/`, "cargo", true}, + {`cargo/`, "cargo/", true}, + {`cargo/`, "mast/", false}, + {`helm.txt/`, "helm.txt", false}, + + // Negation tests + {`!helm.txt`, "helm.txt", false}, + {`!helm.txt`, "tiller.txt", true}, + {`!*.txt`, "cargo", true}, + {`!cargo/`, "mast/", true}, + + // Absolute path tests + {`/a.txt`, "a.txt", true}, + {`/a.txt`, "cargo/a.txt", false}, + {`/cargo/a.txt`, "cargo/a.txt", true}, + } + + for _, test := range tests { + r, err := parseString(test.pattern) + if err != nil { + t.Fatalf("Failed to parse: %s", err) + } + fi, err := os.Stat(filepath.Join(testdata, test.name)) + if err != nil { + t.Fatalf("Fixture missing: %s", err) + } + + if r.Ignore(test.name, fi) != test.expect { + t.Errorf("Expected %q to be %v for pattern %q", test.name, test.expect, test.pattern) + } + } +} + +func TestAddDefaults(t *testing.T) { + r := Rules{} + r.AddDefaults() + + if len(r.patterns) != 1 { + t.Errorf("Expected 1 default patterns, got %d", len(r.patterns)) + } +} + +func parseString(str string) (*Rules, error) { + b := bytes.NewBuffer([]byte(str)) + return Parse(b) +} diff --git a/internal/helm/chart/secureloader/ignore/testdata/.helmignore b/internal/helm/chart/secureloader/ignore/testdata/.helmignore new file mode 100644 index 000000000..b2693bae7 --- /dev/null +++ b/internal/helm/chart/secureloader/ignore/testdata/.helmignore @@ -0,0 +1,3 @@ +mast/a.txt +.DS_Store +.git diff --git a/internal/helm/chart/secureloader/ignore/testdata/.joonix b/internal/helm/chart/secureloader/ignore/testdata/.joonix new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/a.txt b/internal/helm/chart/secureloader/ignore/testdata/a.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/cargo/a.txt b/internal/helm/chart/secureloader/ignore/testdata/cargo/a.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/cargo/b.txt b/internal/helm/chart/secureloader/ignore/testdata/cargo/b.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/cargo/c.txt b/internal/helm/chart/secureloader/ignore/testdata/cargo/c.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/helm.txt b/internal/helm/chart/secureloader/ignore/testdata/helm.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/mast/a.txt b/internal/helm/chart/secureloader/ignore/testdata/mast/a.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/mast/b.txt b/internal/helm/chart/secureloader/ignore/testdata/mast/b.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/mast/c.txt b/internal/helm/chart/secureloader/ignore/testdata/mast/c.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/rudder.txt b/internal/helm/chart/secureloader/ignore/testdata/rudder.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/templates/.dotfile b/internal/helm/chart/secureloader/ignore/testdata/templates/.dotfile new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/ignore/testdata/tiller.txt b/internal/helm/chart/secureloader/ignore/testdata/tiller.txt new file mode 100644 index 000000000..e69de29bb diff --git a/internal/helm/chart/secureloader/loader.go b/internal/helm/chart/secureloader/loader.go new file mode 100644 index 000000000..e17adc314 --- /dev/null +++ b/internal/helm/chart/secureloader/loader.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secureloader + +import ( + "errors" + "io/fs" + "os" + "path/filepath" + "strings" + + securejoin "github.com/cyphar/filepath-securejoin" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + + "github.com/fluxcd/source-controller/internal/helm" +) + +// Loader returns a new loader.ChartLoader appropriate for the given chart +// name. That being, SecureDirLoader when name is a directory, and +// FileLoader when it's a file. +// Name can be an absolute or relative path, but always has to be inside +// root. +func Loader(root, name string) (loader.ChartLoader, error) { + root, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + relName := filepath.Clean(name) + if filepath.IsAbs(relName) { + var err error + if relName, err = filepath.Rel(root, name); err != nil { + return nil, err + } + } + + secureName, err := securejoin.SecureJoin(root, relName) + if err != nil { + return nil, err + } + fi, err := os.Lstat(secureName) + if err != nil { + if pathErr := new(fs.PathError); errors.As(err, &pathErr) { + return nil, &fs.PathError{Op: pathErr.Op, Path: strings.TrimPrefix(secureName, root), Err: pathErr.Err} + } + return nil, err + } + + if fi.IsDir() { + return NewSecureDirLoader(root, relName, helm.MaxChartFileSize), nil + } + return FileLoader(secureName), nil +} + +// Load takes a string root and name, tries to resolve it to a file or directory, +// and then loads it securely without traversing outside of root. +// +// This is the preferred way to load a chart. It will discover the chart encoding +// and hand off to the appropriate chart reader. +// +// If a .helmignore file is present, the directory loader will skip loading any files +// matching it. But .helmignore is not evaluated when reading out of an archive. +func Load(root, name string) (*chart.Chart, error) { + l, err := Loader(root, name) + if err != nil { + return nil, err + } + return l.Load() +} diff --git a/internal/helm/chart/secureloader/loader_test.go b/internal/helm/chart/secureloader/loader_test.go new file mode 100644 index 000000000..af7de550e --- /dev/null +++ b/internal/helm/chart/secureloader/loader_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secureloader + +import ( + "io/fs" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "sigs.k8s.io/yaml" + + "github.com/fluxcd/source-controller/internal/helm" +) + +func TestLoader(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + fakeChart := filepath.Join(tmpDir, "fake.tgz") + g.Expect(os.WriteFile(fakeChart, []byte(""), 0o640)).To(Succeed()) + + t.Run("file loader", func(t *testing.T) { + g := NewWithT(t) + + got, err := Loader(tmpDir, fakeChart) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(loader.FileLoader(fakeChart))) + }) + + t.Run("dir loader", func(t *testing.T) { + g := NewWithT(t) + + fakeChartPath := filepath.Join(tmpDir, "fake") + g.Expect(os.Mkdir(fakeChartPath, 0o700)).To(Succeed()) + + got, err := Loader(tmpDir, "fake") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(SecureDirLoader{root: tmpDir, path: "fake", maxSize: helm.MaxChartFileSize})) + }) + + t.Run("illegal path", func(t *testing.T) { + g := NewWithT(t) + + symlinkRoot := filepath.Join(tmpDir, "symlink") + g.Expect(os.Mkdir(symlinkRoot, 0o700)).To(Succeed()) + symlinkPath := filepath.Join(symlinkRoot, "fake.tgz") + g.Expect(os.Symlink(fakeChart, symlinkPath)) + + got, err := Loader(symlinkRoot, symlinkPath) + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(BeAssignableToTypeOf(&fs.PathError{})) + g.Expect(got).To(BeNil()) + }) +} + +func TestLoad(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + metadata := chart.Metadata{ + Name: "test", + APIVersion: "v2", + Version: "1.0", + Type: "application", + } + b, err := yaml.Marshal(&metadata) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(os.WriteFile(filepath.Join(tmpDir, "Chart.yaml"), b, 0o640)).To(Succeed()) + + got, err := Load(tmpDir, "") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got.Name()).To(Equal(metadata.Name)) +} diff --git a/internal/helm/chart/secureloader/sympath/walk.go b/internal/helm/chart/secureloader/sympath/walk.go new file mode 100644 index 000000000..a9763c56a --- /dev/null +++ b/internal/helm/chart/secureloader/sympath/walk.go @@ -0,0 +1,124 @@ +/* +Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are +provided under the BSD license. + +https://github.com/golang/go/blob/master/LICENSE + +Copyright The Helm Authors. +Copyright The Flux authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sympath + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" +) + +// AbsWalkFunc functions like filepath.WalkFunc but provides the absolute path +// of fs.FileInfo when path is a symlink. +type AbsWalkFunc func(path, absPath string, info fs.FileInfo, err error) error + +// Walk walks the file tree rooted at root, calling walkFn for each file or directory +// in the tree, including root. All errors that arise visiting files and directories +// are filtered by walkFn. The files are walked in lexical order, which makes the +// output deterministic but means that for very large directories Walk can be +// inefficient. Walk follows symbolic links. +func Walk(root string, walkFn AbsWalkFunc) error { + info, err := os.Lstat(root) + if err != nil { + err = walkFn(root, root, nil, err) + } else { + err = symwalk(root, root, info, walkFn) + } + if err == filepath.SkipDir { + return nil + } + return err +} + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +func readDirNames(dirname string) ([]string, error) { + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// symwalk recursively descends path, calling AbsWalkFunc. +func symwalk(path, absPath string, info os.FileInfo, walkFn AbsWalkFunc) error { + // Recursively walk symlinked directories. + if IsSymlink(info) { + resolved, err := filepath.EvalSymlinks(path) + if err != nil { + return fmt.Errorf("error evaluating symlink %s: %w", path, err) + } + if info, err = os.Lstat(resolved); err != nil { + return err + } + // NB: pass-on resolved as absolute path + if err := symwalk(path, resolved, info, walkFn); err != nil && err != filepath.SkipDir { + return err + } + return nil + } + + if err := walkFn(path, absPath, info, nil); err != nil { + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(path) + if err != nil { + return walkFn(path, absPath, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + // NB: possibly absPath != path separately + absFilename := filepath.Join(absPath, name) + fileInfo, err := os.Lstat(filename) + if err != nil { + if err := walkFn(filename, absFilename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + if err = symwalk(filename, absFilename, fileInfo, walkFn); err != nil { + if (!fileInfo.IsDir() && !IsSymlink(fileInfo)) || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// IsSymlink is used to determine if the fileinfo is a symbolic link. +func IsSymlink(fi os.FileInfo) bool { + return fi.Mode()&os.ModeSymlink != 0 +} diff --git a/internal/helm/chart/secureloader/sympath/walk_test.go b/internal/helm/chart/secureloader/sympath/walk_test.go new file mode 100644 index 000000000..50740f34e --- /dev/null +++ b/internal/helm/chart/secureloader/sympath/walk_test.go @@ -0,0 +1,160 @@ +/* +Copyright (c) for portions of walk_test.go are held by The Go Authors, 2009 and are +provided under the BSD license. + +https://github.com/golang/go/blob/master/LICENSE + +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sympath + +import ( + "os" + "path/filepath" + "testing" +) + +type Node struct { + name string + entries []*Node // nil if the entry is a file + marks int + expectedMarks int + symLinkedTo string + absPath string + expectedAbsPath string +} + +var tree = &Node{ + "testdata", + []*Node{ + {"a", nil, 0, 1, "", "", "testdata/a"}, + {"b", []*Node{}, 0, 1, "", "", "testdata/b"}, + {"c", nil, 0, 2, "", "", "testdata/c"}, + {"d", nil, 0, 0, "c", "", "testdata/c"}, + { + "e", + []*Node{ + {"x", nil, 0, 1, "", "", "testdata/e/x"}, + {"y", []*Node{}, 0, 1, "", "", "testdata/e/y"}, + { + "z", + []*Node{ + {"u", nil, 0, 1, "", "", "testdata/e/z/u"}, + {"v", nil, 0, 1, "", "", "testdata/e/z/v"}, + {"w", nil, 0, 1, "", "", "testdata/e/z/w"}, + }, + 0, + 1, + "", "", "testdata/e/z", + }, + }, + 0, + 1, + "", "", "testdata/e", + }, + }, + 0, + 1, + "", "", "testdata", +} + +func walkTree(n *Node, path string, f func(path string, n *Node)) { + f(path, n) + for _, e := range n.entries { + walkTree(e, filepath.Join(path, e.name), f) + } +} + +func makeTree(t *testing.T) { + walkTree(tree, tree.name, func(path string, n *Node) { + if n.entries == nil { + if n.symLinkedTo != "" { + if err := os.Symlink(n.symLinkedTo, path); err != nil { + t.Fatalf("makeTree: %v", err) + } + } else { + fd, err := os.Create(path) + if err != nil { + t.Fatalf("makeTree: %v", err) + return + } + fd.Close() + } + } else { + if err := os.Mkdir(path, 0770); err != nil { + t.Fatalf("makeTree: %v", err) + } + } + }) +} + +func checkMarks(t *testing.T, report bool) { + walkTree(tree, tree.name, func(path string, n *Node) { + if n.marks != n.expectedMarks && report { + t.Errorf("node %s mark = %d; expected %d", path, n.marks, n.expectedMarks) + } + if n.absPath != n.expectedAbsPath && report { + t.Errorf("node %s absPath = %s; expected %s", path, n.absPath, n.expectedAbsPath) + } + n.marks = 0 + }) +} + +// Assumes that each node name is unique. Good enough for a test. +// If clear is true, any incoming error is cleared before return. The errors +// are always accumulated, though. +func mark(absPath string, info os.FileInfo, err error, errors *[]error, clear bool) error { + if err != nil { + *errors = append(*errors, err) + if clear { + return nil + } + return err + } + name := info.Name() + walkTree(tree, tree.name, func(path string, n *Node) { + if n.symLinkedTo == name { + n.absPath = absPath + } + if n.name == name { + n.marks++ + n.absPath = absPath + } + }) + return nil +} + +func TestWalk(t *testing.T) { + makeTree(t) + errors := make([]error, 0, 10) + clear := true + markFn := func(path, absPath string, info os.FileInfo, err error) error { + return mark(absPath, info, err, &errors, clear) + } + // Expect no errors. + err := Walk(tree.name, markFn) + if err != nil { + t.Fatalf("no error expected, found: %s", err) + } + if len(errors) != 0 { + t.Fatalf("unexpected errors: %s", errors) + } + checkMarks(t, true) + + // cleanup + if err := os.RemoveAll(tree.name); err != nil { + t.Errorf("removeTree: %v", err) + } +} diff --git a/internal/helm/chart_test.go b/internal/helm/chart_test.go deleted file mode 100644 index c0b3e8c58..000000000 --- a/internal/helm/chart_test.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "reflect" - "testing" - - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chartutil" -) - -var ( - originalValuesFixture []byte = []byte("override: original") - chartFilesFixture []*helmchart.File = []*helmchart.File{ - { - Name: "values.yaml", - Data: originalValuesFixture, - }, - } - chartFixture helmchart.Chart = helmchart.Chart{ - Metadata: &helmchart.Metadata{ - Name: "test", - Version: "0.1.0", - }, - Raw: chartFilesFixture, - Files: chartFilesFixture, - } -) - -func TestOverwriteChartDefaultValues(t *testing.T) { - invalidChartFixture := chartFixture - invalidChartFixture.Raw = []*helmchart.File{} - invalidChartFixture.Files = []*helmchart.File{} - - testCases := []struct { - desc string - chart helmchart.Chart - data []byte - ok bool - expectErr bool - }{ - { - desc: "invalid chart", - chart: invalidChartFixture, - data: originalValuesFixture, - expectErr: true, - }, - { - desc: "identical override", - chart: chartFixture, - data: originalValuesFixture, - }, - { - desc: "valid override", - chart: chartFixture, - ok: true, - data: []byte("override: test"), - }, - { - desc: "empty override", - chart: chartFixture, - ok: true, - data: []byte(""), - }, - { - desc: "invalid", - chart: chartFixture, - data: []byte("!fail:"), - expectErr: true, - }, - } - for _, tt := range testCases { - t.Run(tt.desc, func(t *testing.T) { - fixture := tt.chart - ok, err := OverwriteChartDefaultValues(&fixture, tt.data) - if ok != tt.ok { - t.Fatalf("should return %v, returned %v", tt.ok, ok) - } - if err != nil && !tt.expectErr { - t.Fatalf("returned unexpected error: %v", err) - } - if err == nil && tt.expectErr { - t.Fatal("expected error") - } - - for _, f := range fixture.Raw { - if f.Name == chartutil.ValuesfileName && reflect.DeepEqual(f.Data, originalValuesFixture) && tt.ok { - t.Error("should override values.yaml in Raw field") - } - } - for _, f := range fixture.Files { - if f.Name == chartutil.ValuesfileName && reflect.DeepEqual(f.Data, originalValuesFixture) && tt.ok { - t.Error("should override values.yaml in Files field") - } - } - }) - } -} diff --git a/internal/helm/common/string_resource.go b/internal/helm/common/string_resource.go new file mode 100644 index 000000000..b4cdada9f --- /dev/null +++ b/internal/helm/common/string_resource.go @@ -0,0 +1,39 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import "strings" + +// StringResource is there to satisfy the github.com/google/go-containerregistry/pkg/authn.Resource interface. +// It merely wraps a given string and returns it for all of the interface's methods. +type StringResource struct { + Registry string +} + +// String returns a string representation of the StringResource. +// It converts the StringResource object to a string. +// The returned string contains the value of the StringResource. +func (r StringResource) String() string { + return r.Registry +} + +// RegistryStr returns the string representation of the registry resource. +// It converts the StringResource object to a string that represents the registry resource. +// The returned string can be used to interact with the registry resource. +func (r StringResource) RegistryStr() string { + return strings.Split(r.Registry, "/")[0] +} diff --git a/internal/helm/dependency_manager.go b/internal/helm/dependency_manager.go deleted file mode 100644 index b6e9b982b..000000000 --- a/internal/helm/dependency_manager.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/Masterminds/semver/v3" - securejoin "github.com/cyphar/filepath-securejoin" - "golang.org/x/sync/errgroup" - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" -) - -// DependencyWithRepository is a container for a Helm chart dependency -// and its respective repository. -type DependencyWithRepository struct { - // Dependency holds the reference to a chart.Chart dependency. - Dependency *helmchart.Dependency - // Repository is the ChartRepository the dependency should be - // available at and can be downloaded from. If there is none, - // a local ('file://') dependency is assumed. - Repository *ChartRepository -} - -// DependencyManager manages dependencies for a Helm chart. -type DependencyManager struct { - // WorkingDir is the chroot path for dependency manager operations, - // Dependencies that hold a local (relative) path reference are not - // allowed to traverse outside this directory. - WorkingDir string - // ChartPath is the path of the Chart relative to the WorkingDir, - // the combination of the WorkingDir and ChartPath is used to - // determine the absolute path of a local dependency. - ChartPath string - // Chart holds the loaded chart.Chart from the ChartPath. - Chart *helmchart.Chart - // Dependencies contains a list of dependencies, and the respective - // repository the dependency can be found at. - Dependencies []*DependencyWithRepository -} - -// Build compiles and builds the dependencies of the Chart. -func (dm *DependencyManager) Build(ctx context.Context) error { - if len(dm.Dependencies) == 0 { - return nil - } - - errs, ctx := errgroup.WithContext(ctx) - for _, item := range dm.Dependencies { - errs.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var err error - switch item.Repository { - case nil: - err = dm.addLocalDependency(item) - default: - err = dm.addRemoteDependency(item) - } - return err - }) - } - - return errs.Wait() -} - -func (dm *DependencyManager) addLocalDependency(dpr *DependencyWithRepository) error { - sLocalChartPath, err := dm.secureLocalChartPath(dpr) - if err != nil { - return err - } - - if _, err := os.Stat(sLocalChartPath); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("no chart found at '%s' (reference '%s') for dependency '%s'", - strings.TrimPrefix(sLocalChartPath, dm.WorkingDir), dpr.Dependency.Repository, dpr.Dependency.Name) - } - return err - } - - ch, err := loader.Load(sLocalChartPath) - if err != nil { - return err - } - - constraint, err := semver.NewConstraint(dpr.Dependency.Version) - if err != nil { - err := fmt.Errorf("dependency '%s' has an invalid version/constraint format: %w", dpr.Dependency.Name, err) - return err - } - - v, err := semver.NewVersion(ch.Metadata.Version) - if err != nil { - return err - } - - if !constraint.Check(v) { - err = fmt.Errorf("can't get a valid version for dependency '%s'", dpr.Dependency.Name) - return err - } - - dm.Chart.AddDependency(ch) - return nil -} - -func (dm *DependencyManager) addRemoteDependency(dpr *DependencyWithRepository) error { - if dpr.Repository == nil { - return fmt.Errorf("no ChartRepository given for '%s' dependency", dpr.Dependency.Name) - } - - chartVer, err := dpr.Repository.Get(dpr.Dependency.Name, dpr.Dependency.Version) - if err != nil { - return err - } - - res, err := dpr.Repository.DownloadChart(chartVer) - if err != nil { - return err - } - - ch, err := loader.LoadArchive(res) - if err != nil { - return err - } - - dm.Chart.AddDependency(ch) - return nil -} - -func (dm *DependencyManager) secureLocalChartPath(dep *DependencyWithRepository) (string, error) { - localUrl, err := url.Parse(dep.Dependency.Repository) - if err != nil { - return "", fmt.Errorf("failed to parse alleged local chart reference: %w", err) - } - if localUrl.Scheme != "file" { - return "", fmt.Errorf("'%s' is not a local chart reference", dep.Dependency.Repository) - } - return securejoin.SecureJoin(dm.WorkingDir, filepath.Join(dm.ChartPath, localUrl.Host, localUrl.Path)) -} diff --git a/internal/helm/dependency_manager_test.go b/internal/helm/dependency_manager_test.go deleted file mode 100644 index a66977751..000000000 --- a/internal/helm/dependency_manager_test.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "context" - "fmt" - "io/ioutil" - "strings" - "testing" - - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/repo" -) - -var ( - helmPackageFile = "testdata/charts/helmchart-0.1.0.tgz" - - chartName = "helmchart" - chartVersion = "0.1.0" - chartLocalRepository = "file://../helmchart" - remoteDepFixture = helmchart.Dependency{ - Name: chartName, - Version: chartVersion, - Repository: "https://example.com/charts", - } -) - -func TestBuild_WithEmptyDependencies(t *testing.T) { - dm := DependencyManager{ - Dependencies: nil, - } - if err := dm.Build(context.TODO()); err != nil { - t.Errorf("Build() should return nil") - } -} - -func TestBuild_WithLocalChart(t *testing.T) { - tests := []struct { - name string - dep helmchart.Dependency - wantErr bool - errMsg string - }{ - { - name: "valid path", - dep: helmchart.Dependency{ - Name: chartName, - Version: chartVersion, - Repository: chartLocalRepository, - }, - }, - { - name: "valid path", - dep: helmchart.Dependency{ - Name: chartName, - Alias: "aliased", - Version: chartVersion, - Repository: chartLocalRepository, - }, - }, - { - name: "allowed traversing path", - dep: helmchart.Dependency{ - Name: chartName, - Alias: "aliased", - Version: chartVersion, - Repository: "file://../../../testdata/charts/helmchartwithdeps/../helmchart", - }, - }, - { - name: "invalid path", - dep: helmchart.Dependency{ - Name: chartName, - Version: chartVersion, - Repository: "file://../invalid", - }, - wantErr: true, - errMsg: "no chart found at", - }, - { - name: "illegal traversing path", - dep: helmchart.Dependency{ - Name: chartName, - Version: chartVersion, - Repository: "file://../../../../../controllers/testdata/charts/helmchart", - }, - wantErr: true, - errMsg: "no chart found at", - }, - { - name: "invalid version constraint format", - dep: helmchart.Dependency{ - Name: chartName, - Version: "!2.0", - Repository: chartLocalRepository, - }, - wantErr: true, - errMsg: "has an invalid version/constraint format", - }, - { - name: "invalid version", - dep: helmchart.Dependency{ - Name: chartName, - Version: chartVersion, - Repository: chartLocalRepository, - }, - wantErr: true, - errMsg: "can't get a valid version for dependency", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := chartFixture - dm := DependencyManager{ - WorkingDir: "./", - ChartPath: "testdata/charts/helmchart", - Chart: &c, - Dependencies: []*DependencyWithRepository{ - { - Dependency: &tt.dep, - Repository: nil, - }, - }, - } - - err := dm.Build(context.TODO()) - deps := dm.Chart.Dependencies() - - if (err != nil) && tt.wantErr { - if !strings.Contains(err.Error(), tt.errMsg) { - t.Errorf("Build() expected to return error: %s, got: %s", tt.errMsg, err) - } - if len(deps) > 0 { - t.Fatalf("chart expected to have no dependencies registered") - } - return - } else if err != nil { - t.Errorf("Build() not expected to return an error: %s", err) - return - } - - if len(deps) == 0 { - t.Fatalf("chart expected to have at least one dependency registered") - } - if deps[0].Metadata.Name != chartName { - t.Errorf("chart dependency has incorrect name, expected: %s, got: %s", chartName, deps[0].Metadata.Name) - } - if deps[0].Metadata.Version != chartVersion { - t.Errorf("chart dependency has incorrect version, expected: %s, got: %s", chartVersion, deps[0].Metadata.Version) - } - }) - } -} - -func TestBuild_WithRemoteChart(t *testing.T) { - chart := chartFixture - b, err := ioutil.ReadFile(helmPackageFile) - if err != nil { - t.Fatal(err) - } - i := repo.NewIndexFile() - i.Add(&helmchart.Metadata{Name: chartName, Version: chartVersion}, fmt.Sprintf("%s-%s.tgz", chartName, chartVersion), "http://example.com/charts", "sha256:1234567890") - mg := mockGetter{response: b} - cr := &ChartRepository{ - URL: remoteDepFixture.Repository, - Index: i, - Client: &mg, - } - dm := DependencyManager{ - Chart: &chart, - Dependencies: []*DependencyWithRepository{ - { - Dependency: &remoteDepFixture, - Repository: cr, - }, - }, - } - - if err := dm.Build(context.TODO()); err != nil { - t.Errorf("Build() expected to not return error: %s", err) - } - - deps := dm.Chart.Dependencies() - if len(deps) != 1 { - t.Fatalf("chart expected to have one dependency registered") - } - if deps[0].Metadata.Name != chartName { - t.Errorf("chart dependency has incorrect name, expected: %s, got: %s", chartName, deps[0].Metadata.Name) - } - if deps[0].Metadata.Version != chartVersion { - t.Errorf("chart dependency has incorrect version, expected: %s, got: %s", chartVersion, deps[0].Metadata.Version) - } - - // When repo is not set - dm.Dependencies[0].Repository = nil - if err := dm.Build(context.TODO()); err == nil { - t.Errorf("Build() expected to return error") - } else if !strings.Contains(err.Error(), "is not a local chart reference") { - t.Errorf("Build() expected to return different error, got: %s", err) - } -} diff --git a/internal/helm/getter.go b/internal/helm/getter.go deleted file mode 100644 index bc7435c4f..000000000 --- a/internal/helm/getter.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" -) - -// ClientOptionsFromSecret constructs a getter.Option slice for the given secret. -// It returns the slice, and a callback to remove temporary files. -func ClientOptionsFromSecret(secret corev1.Secret) ([]getter.Option, func(), error) { - var opts []getter.Option - basicAuth, err := BasicAuthFromSecret(secret) - if err != nil { - return opts, nil, err - } - if basicAuth != nil { - opts = append(opts, basicAuth) - } - tlsClientConfig, cleanup, err := TLSClientConfigFromSecret(secret) - if err != nil { - return opts, nil, err - } - if tlsClientConfig != nil { - opts = append(opts, tlsClientConfig) - } - return opts, cleanup, nil -} - -// BasicAuthFromSecret attempts to construct a basic auth getter.Option for the -// given v1.Secret and returns the result. -// -// Secrets with no username AND password are ignored, if only one is defined it -// returns an error. -func BasicAuthFromSecret(secret corev1.Secret) (getter.Option, error) { - username, password := string(secret.Data["username"]), string(secret.Data["password"]) - switch { - case username == "" && password == "": - return nil, nil - case username == "" || password == "": - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) - } - return getter.WithBasicAuth(username, password), nil -} - -// TLSClientConfigFromSecret attempts to construct a TLS client config -// getter.Option for the given v1.Secret. It returns the getter.Option and a -// callback to remove the temporary TLS files. -// -// Secrets with no certFile, keyFile, AND caFile are ignored, if only a -// certBytes OR keyBytes is defined it returns an error. -func TLSClientConfigFromSecret(secret corev1.Secret) (getter.Option, func(), error) { - certBytes, keyBytes, caBytes := secret.Data["certFile"], secret.Data["keyFile"], secret.Data["caFile"] - switch { - case len(certBytes)+len(keyBytes)+len(caBytes) == 0: - return nil, func() {}, nil - case (len(certBytes) > 0 && len(keyBytes) == 0) || (len(keyBytes) > 0 && len(certBytes) == 0): - return nil, nil, fmt.Errorf("invalid '%s' secret data: fields 'certFile' and 'keyFile' require each other's presence", - secret.Name) - } - - // create tmp dir for TLS files - tmp, err := ioutil.TempDir("", "helm-tls-"+secret.Name) - if err != nil { - return nil, nil, err - } - cleanup := func() { os.RemoveAll(tmp) } - - var certFile, keyFile, caFile string - - if len(certBytes) > 0 && len(keyBytes) > 0 { - certFile = filepath.Join(tmp, "cert.crt") - if err := ioutil.WriteFile(certFile, certBytes, 0644); err != nil { - cleanup() - return nil, nil, err - } - keyFile = filepath.Join(tmp, "key.crt") - if err := ioutil.WriteFile(keyFile, keyBytes, 0644); err != nil { - cleanup() - return nil, nil, err - } - } - - if len(caBytes) > 0 { - caFile = filepath.Join(tmp, "ca.pem") - if err := ioutil.WriteFile(caFile, caBytes, 0644); err != nil { - cleanup() - return nil, nil, err - } - } - - return getter.WithTLSClientConfig(certFile, keyFile, caFile), cleanup, nil -} diff --git a/internal/helm/getter/client_opts.go b/internal/helm/getter/client_opts.go new file mode 100644 index 000000000..2dba9a00a --- /dev/null +++ b/internal/helm/getter/client_opts.go @@ -0,0 +1,301 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "os" + "path" + + "github.com/google/go-containerregistry/pkg/authn" + helmgetter "helm.sh/helm/v3/pkg/getter" + helmreg "helm.sh/helm/v3/pkg/registry" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/runtime/secrets" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/helm/registry" + soci "github.com/fluxcd/source-controller/internal/oci" +) + +const ( + certFileName = "cert.pem" + keyFileName = "key.pem" + caFileName = "ca.pem" +) + +var ErrDeprecatedTLSConfig = errors.New("TLS configured in a deprecated manner") + +// ClientOpts contains the various options to use while constructing +// a Helm repository client. +type ClientOpts struct { + Authenticator authn.Authenticator + Keychain authn.Keychain + RegLoginOpts []helmreg.LoginOption + TlsConfig *tls.Config + GetterOpts []helmgetter.Option + Insecure bool +} + +// MustLoginToRegistry returns true if the client options contain at least +// one registry login option. +func (o ClientOpts) MustLoginToRegistry() bool { + return len(o.RegLoginOpts) > 0 && o.RegLoginOpts[0] != nil +} + +// GetClientOpts uses the provided HelmRepository object and a normalized +// URL to construct a HelmClientOpts object. If obj is an OCI HelmRepository, +// then the returned options object will also contain the required registry +// auth mechanisms. +// A temporary directory is created to store the certs files if needed and its path is returned along with the options object. It is the +// caller's responsibility to clean up the directory. +func GetClientOpts(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, url string) (*ClientOpts, string, error) { + // This function configures authentication for Helm repositories based on the provided secrets: + // - CertSecretRef: TLS client certificates (always takes priority) + // - SecretRef: Can contain Basic Auth or TLS certificates (deprecated) + // For OCI repositories, additional registry-specific authentication is configured (including Docker config) + opts := &ClientOpts{ + GetterOpts: []helmgetter.Option{ + helmgetter.WithURL(url), + helmgetter.WithTimeout(obj.GetTimeout()), + helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), + }, + Insecure: obj.Spec.Insecure, + } + + // Process secrets and configure authentication + deprecatedTLS, certSecret, authSecret, err := configureAuthentication(ctx, c, obj, opts, url) + if err != nil { + return nil, "", err + } + + // Setup OCI registry specific configurations if needed + var tempCertDir string + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { + tempCertDir, err = configureOCIRegistryWithSecrets(ctx, obj, opts, url, certSecret, authSecret) + if err != nil { + return nil, "", err + } + } + + var deprecatedErr error + if deprecatedTLS { + deprecatedErr = ErrDeprecatedTLSConfig + } + + return opts, tempCertDir, deprecatedErr +} + +// configureAuthentication processes all secret references and sets up authentication. +// Returns (deprecatedTLS, certSecret, authSecret, error) where: +// - deprecatedTLS: true if TLS config comes from SecretRef (deprecated pattern) +// - certSecret: the secret from CertSecretRef (nil if not specified) +// - authSecret: the secret from SecretRef (nil if not specified) +func configureAuthentication(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, opts *ClientOpts, url string) (bool, *corev1.Secret, *corev1.Secret, error) { + var deprecatedTLS bool + var certSecret, authSecret *corev1.Secret + + if obj.Spec.CertSecretRef != nil { + secret, err := fetchSecret(ctx, c, obj.Spec.CertSecretRef.Name, obj.GetNamespace()) + if err != nil { + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.CertSecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get TLS authentication secret '%s': %w", secretRef, err) + } + certSecret = secret + + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + tlsConfig, err := secrets.TLSConfigFromSecret(ctx, secret, tlsOpts...) + if err != nil { + return false, nil, nil, fmt.Errorf("failed to construct Helm client's TLS config: %w", err) + } + opts.TlsConfig = tlsConfig + } + + // Extract all authentication methods from SecretRef. + // This secret may contain multiple auth types (Basic Auth, TLS). + if obj.Spec.SecretRef != nil { + secret, err := fetchSecret(ctx, c, obj.Spec.SecretRef.Name, obj.GetNamespace()) + if err != nil { + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get authentication secret '%s': %w", secretRef, err) + } + authSecret = secret + + // NOTE: Use WithTLSSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository auth methods work with both system and user-provided CA certificates. + var authOpts = []secrets.AuthMethodsOption{ + secrets.WithTLSSystemCertPool(), + } + methods, err := secrets.AuthMethodsFromSecret(ctx, secret, authOpts...) + if err != nil { + return false, nil, nil, fmt.Errorf("failed to detect authentication methods: %w", err) + } + + if methods.HasBasicAuth() { + opts.GetterOpts = append(opts.GetterOpts, + helmgetter.WithBasicAuth(methods.Basic.Username, methods.Basic.Password)) + } + + // Use TLS from SecretRef only if CertSecretRef is not specified (CertSecretRef takes priority) + if opts.TlsConfig == nil && methods.HasTLS() { + opts.TlsConfig = methods.TLS + deprecatedTLS = true + } + } + + return deprecatedTLS, certSecret, authSecret, nil +} + +// configureOCIRegistryWithSecrets sets up OCI-specific configurations using pre-fetched secrets +func configureOCIRegistryWithSecrets(ctx context.Context, obj *sourcev1.HelmRepository, opts *ClientOpts, url string, certSecret, authSecret *corev1.Secret) (string, error) { + // Configure OCI authentication from authSecret if available + if authSecret != nil { + keychain, err := registry.LoginOptionFromSecret(url, *authSecret) + if err != nil { + return "", fmt.Errorf("failed to configure login options: %w", err) + } + opts.Keychain = keychain + } + + // Handle OCI provider authentication if no SecretRef + if obj.Spec.SecretRef == nil && obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider { + authenticator, err := soci.OIDCAuth(ctx, url, obj.Spec.Provider) + if err != nil { + return "", fmt.Errorf("failed to get credential from '%s': %w", obj.Spec.Provider, err) + } + opts.Authenticator = authenticator + } + + // Setup registry login options + loginOpt, err := registry.NewLoginOption(opts.Authenticator, opts.Keychain, url) + if err != nil { + return "", err + } + if loginOpt == nil { + return "", nil + } + opts.RegLoginOpts = []helmreg.LoginOption{loginOpt, helmreg.LoginOptInsecure(obj.Spec.Insecure)} + + // Handle TLS for login options + var tempCertDir string + if opts.TlsConfig != nil { + // Until Helm 3.19 only a file-based login option for TLS is supported. + // In Helm 4 (or in Helm 3.20+ if it ever gets released), a simpler + // in-memory login option for TLS will be available: + // https://github.com/helm/helm/pull/31076 + + tempCertDir, err = os.MkdirTemp("", "helm-repo-oci-certs") + if err != nil { + return "", fmt.Errorf("cannot create temporary directory: %w", err) + } + + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret + } + + certFile, keyFile, caFile, err := storeTLSCertificateFilesForOCI(ctx, tlsSecret, nil, tempCertDir) + if err != nil { + return "", fmt.Errorf("cannot write certs files to path: %w", err) + } + + tlsLoginOpt := registry.TLSLoginOption(certFile, keyFile, caFile) + if tlsLoginOpt != nil { + opts.RegLoginOpts = append(opts.RegLoginOpts, tlsLoginOpt) + } + } + + return tempCertDir, nil +} + +func fetchSecret(ctx context.Context, c client.Client, name, namespace string) (*corev1.Secret, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + var secret corev1.Secret + if err := c.Get(ctx, key, &secret); err != nil { + return nil, err + } + return &secret, nil +} + +// storeTLSCertificateFilesForOCI writes TLS certificate data from secrets to files for OCI registry authentication. +// Helm OCI registry client requires certificate file paths rather than in-memory data, +// so we need to temporarily write the certificate data to disk. +// Returns paths to the written cert, key, and CA files (any of which may be empty if not present). +func storeTLSCertificateFilesForOCI(ctx context.Context, certSecret, authSecret *corev1.Secret, path string) (string, string, string, error) { + var ( + certFile string + keyFile string + caFile string + err error + ) + + // Try to get TLS data from certSecret first, then authSecret + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret + } + + if tlsSecret != nil { + if certData, exists := tlsSecret.Data[secrets.KeyTLSCert]; exists { + if keyData, keyExists := tlsSecret.Data[secrets.KeyTLSPrivateKey]; keyExists { + certFile, err = writeToFile(certData, certFileName, path) + if err != nil { + return "", "", "", err + } + keyFile, err = writeToFile(keyData, keyFileName, path) + if err != nil { + return "", "", "", err + } + } + } + + if caData, exists := tlsSecret.Data[secrets.KeyCACert]; exists { + caFile, err = writeToFile(caData, caFileName, path) + if err != nil { + return "", "", "", err + } + } + } + + return certFile, keyFile, caFile, nil +} + +func writeToFile(data []byte, filename, tmpDir string) (string, error) { + file := path.Join(tmpDir, filename) + err := os.WriteFile(file, data, 0o600) + if err != nil { + return "", err + } + return file, nil +} diff --git a/internal/helm/getter/client_opts_test.go b/internal/helm/getter/client_opts_test.go new file mode 100644 index 000000000..bf40e7f86 --- /dev/null +++ b/internal/helm/getter/client_opts_test.go @@ -0,0 +1,300 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "context" + "os" + "strings" + "testing" + "time" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/google/go-containerregistry/pkg/name" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + helmv1 "github.com/fluxcd/source-controller/api/v1" +) + +func TestGetClientOpts(t *testing.T) { + tlsCA, err := os.ReadFile("../../controller/testdata/certs/ca.pem") + if err != nil { + t.Errorf("could not read CA file: %s", err) + } + + tests := []struct { + name string + certSecret *corev1.Secret + authSecret *corev1.Secret + afterFunc func(t *WithT, hcOpts *ClientOpts) + oci bool + insecure bool + err error + }{ + { + name: "HelmRepository with certSecretRef discards TLS config in secretRef", + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.TlsConfig).ToNot(BeNil()) + t.Expect(len(hcOpts.GetterOpts)).To(Equal(4)) + }, + }, + { + name: "HelmRepository with TLS config only in secretRef is marked as deprecated", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-tls", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + "caFile": tlsCA, + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.TlsConfig).ToNot(BeNil()) + t.Expect(len(hcOpts.GetterOpts)).To(Equal(4)) + }, + err: ErrDeprecatedTLSConfig, + }, + { + name: "OCI HelmRepository with secretRef has auth configured", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + repo, err := name.NewRepository("ghcr.io/dummy") + t.Expect(err).ToNot(HaveOccurred()) + authenticator, err := hcOpts.Keychain.Resolve(repo) + t.Expect(err).ToNot(HaveOccurred()) + config, err := authenticator.Authorization() + t.Expect(err).ToNot(HaveOccurred()) + t.Expect(config.Username).To(Equal("user")) + t.Expect(config.Password).To(Equal("pass")) + t.Expect(hcOpts.Insecure).To(BeFalse()) + }, + oci: true, + }, + { + name: "OCI HelmRepository with insecure repository", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.Insecure).To(BeTrue()) + }, + oci: true, + insecure: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + helmRepo := &helmv1.HelmRepository{ + Spec: helmv1.HelmRepositorySpec{ + Timeout: &metav1.Duration{ + Duration: time.Second, + }, + Insecure: tt.insecure, + }, + } + if tt.oci { + helmRepo.Spec.Type = helmv1.HelmRepositoryTypeOCI + } + + clientBuilder := fakeclient.NewClientBuilder() + if tt.authSecret != nil { + clientBuilder.WithObjects(tt.authSecret.DeepCopy()) + helmRepo.Spec.SecretRef = &meta.LocalObjectReference{ + Name: tt.authSecret.Name, + } + } + if tt.certSecret != nil { + clientBuilder.WithObjects(tt.certSecret.DeepCopy()) + helmRepo.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.certSecret.Name, + } + } + c := clientBuilder.Build() + + clientOpts, _, err := GetClientOpts(context.TODO(), c, helmRepo, "https://ghcr.io/dummy") + if tt.err != nil { + g.Expect(err).To(Equal(tt.err)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + tt.afterFunc(g, clientOpts) + }) + } +} + +func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { + tlsCA, err := os.ReadFile("../../controller/testdata/certs/ca.pem") + if err != nil { + t.Errorf("could not read CA file: %s", err) + } + + tests := []struct { + name string + certSecret *corev1.Secret + authSecret *corev1.Secret + loginOptsN int + wantErrMsg string + }{ + { + name: "with valid caFile", + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + }, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + loginOptsN: 3, + }, + { + name: "without caFile", + certSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{}, + }, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + wantErrMsg: "must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'", + }, + { + name: "without cert secret", + certSecret: nil, + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + loginOptsN: 2, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + helmRepo := &helmv1.HelmRepository{ + Spec: helmv1.HelmRepositorySpec{ + Timeout: &metav1.Duration{ + Duration: time.Second, + }, + Type: helmv1.HelmRepositoryTypeOCI, + }, + } + + clientBuilder := fakeclient.NewClientBuilder() + + if tt.authSecret != nil { + clientBuilder.WithObjects(tt.authSecret.DeepCopy()) + helmRepo.Spec.SecretRef = &meta.LocalObjectReference{ + Name: tt.authSecret.Name, + } + } + + if tt.certSecret != nil { + clientBuilder.WithObjects(tt.certSecret.DeepCopy()) + helmRepo.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: tt.certSecret.Name, + } + } + c := clientBuilder.Build() + + clientOpts, tmpDir, err := GetClientOpts(context.TODO(), c, helmRepo, "https://ghcr.io/dummy") + if tt.wantErrMsg != "" { + if err == nil { + t.Errorf("GetClientOpts() expected error but got none") + return + } + if !strings.Contains(err.Error(), tt.wantErrMsg) { + t.Errorf("GetClientOpts() expected error containing %q but got %v", tt.wantErrMsg, err) + return + } + return + } + if err != nil { + t.Errorf("GetClientOpts() error = %v", err) + return + } + if tmpDir != "" { + defer os.RemoveAll(tmpDir) + } + if tt.loginOptsN != len(clientOpts.RegLoginOpts) { + // we should have a login option but no TLS option + t.Errorf("expected length of %d for clientOpts.RegLoginOpts but got %d", tt.loginOptsN, len(clientOpts.RegLoginOpts)) + return + } + }) + } +} diff --git a/internal/helm/getter_test.go b/internal/helm/getter_test.go deleted file mode 100644 index bd4e1058c..000000000 --- a/internal/helm/getter_test.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" -) - -var ( - basicAuthSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "username": []byte("user"), - "password": []byte("password"), - }, - } - tlsSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "certFile": []byte(`fixture`), - "keyFile": []byte(`fixture`), - "caFile": []byte(`fixture`), - }, - } -) - -func TestClientOptionsFromSecret(t *testing.T) { - tests := []struct { - name string - secrets []corev1.Secret - }{ - {"basic auth", []corev1.Secret{basicAuthSecretFixture}}, - {"TLS", []corev1.Secret{tlsSecretFixture}}, - {"basic auth and TLS", []corev1.Secret{basicAuthSecretFixture, tlsSecretFixture}}, - {"empty", []corev1.Secret{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := corev1.Secret{Data: map[string][]byte{}} - for _, s := range tt.secrets { - for k, v := range s.Data { - secret.Data[k] = v - } - } - got, cleanup, err := ClientOptionsFromSecret(secret) - if cleanup != nil { - defer cleanup() - } - if err != nil { - t.Errorf("ClientOptionsFromSecret() error = %v", err) - return - } - if len(got) != len(tt.secrets) { - t.Errorf("ClientOptionsFromSecret() options = %v, expected = %v", got, len(tt.secrets)) - } - }) - } -} - -func TestBasicAuthFromSecret(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - wantNil bool - }{ - {"username and password", basicAuthSecretFixture, nil, false, false}, - {"without username", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "username") }, true, true}, - {"without password", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "password") }, true, true}, - {"empty", corev1.Secret{}, nil, false, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - got, err := BasicAuthFromSecret(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("BasicAuthFromSecret() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantNil && got != nil { - t.Error("BasicAuthFromSecret() != nil") - return - } - }) - } -} - -func TestTLSClientConfigFromSecret(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - wantNil bool - }{ - {"certFile, keyFile and caFile", tlsSecretFixture, nil, false, false}, - {"without certFile", tlsSecretFixture, func(s *corev1.Secret) { delete(s.Data, "certFile") }, true, true}, - {"without keyFile", tlsSecretFixture, func(s *corev1.Secret) { delete(s.Data, "keyFile") }, true, true}, - {"without caFile", tlsSecretFixture, func(s *corev1.Secret) { delete(s.Data, "caFile") }, false, false}, - {"empty", corev1.Secret{}, nil, false, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - got, cleanup, err := TLSClientConfigFromSecret(*secret) - if cleanup != nil { - defer cleanup() - } - if (err != nil) != tt.wantErr { - t.Errorf("TLSClientConfigFromSecret() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantNil && got != nil { - t.Error("TLSClientConfigFromSecret() != nil") - return - } - }) - } -} diff --git a/internal/helm/helm.go b/internal/helm/helm.go new file mode 100644 index 000000000..854a1ab7b --- /dev/null +++ b/internal/helm/helm.go @@ -0,0 +1,29 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helm + +// This list defines a set of global variables used to ensure Helm files loaded +// into memory during runtime do not exceed defined upper bound limits. +var ( + // MaxIndexSize is the max allowed file size in bytes of a ChartRepository. + MaxIndexSize int64 = 50 << 20 + // MaxChartSize is the max allowed file size in bytes of a Helm Chart. + MaxChartSize int64 = 10 << 20 + // MaxChartFileSize is the max allowed file size in bytes of any arbitrary + // file originating from a chart. + MaxChartFileSize int64 = 5 << 20 +) diff --git a/internal/helm/registry/auth.go b/internal/helm/registry/auth.go new file mode 100644 index 000000000..c8b3ca6ae --- /dev/null +++ b/internal/helm/registry/auth.go @@ -0,0 +1,152 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "bytes" + "fmt" + "net/url" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/credentials" + "github.com/fluxcd/source-controller/internal/helm/common" + "github.com/fluxcd/source-controller/internal/oci" + "github.com/google/go-containerregistry/pkg/authn" + "helm.sh/helm/v3/pkg/registry" + corev1 "k8s.io/api/core/v1" +) + +// helper is a subset of the Docker credential helper credentials.Helper interface used by NewKeychainFromHelper. +type helper struct { + registry string + username, password string + err error +} + +func (h helper) Get(serverURL string) (string, string, error) { + if serverURL != h.registry { + return "", "", fmt.Errorf("unexpected serverURL: %s", serverURL) + } + return h.username, h.password, h.err +} + +// LoginOptionFromSecret derives authentication data from a Secret to login to an OCI registry. This Secret +// may either hold "username" and "password" fields or be of the corev1.SecretTypeDockerConfigJson type and hold +// a corev1.DockerConfigJsonKey field with a complete Docker configuration. If both, "username" and "password" are +// empty, a nil LoginOption and a nil error will be returned. +func LoginOptionFromSecret(registryURL string, secret corev1.Secret) (authn.Keychain, error) { + var username, password string + parsedURL, err := url.Parse(registryURL) + if err != nil { + return nil, fmt.Errorf("unable to parse registry URL '%s' while reconciling Secret '%s': %w", + registryURL, secret.Name, err) + } + if secret.Type == corev1.SecretTypeDockerConfigJson { + dockerCfg, err := config.LoadFromReader(bytes.NewReader(secret.Data[corev1.DockerConfigJsonKey])) + if err != nil { + return nil, fmt.Errorf("unable to load Docker config from Secret '%s': %w", secret.Name, err) + } + authConfig, err := dockerCfg.GetAuthConfig(parsedURL.Host) + if err != nil { + return nil, fmt.Errorf("unable to get authentication data from Secret '%s': %w", secret.Name, err) + } + + // Make sure that the obtained auth config is for the requested host. + // When the docker config does not contain the credentials for a host, + // the credential store returns an empty auth config. + // Refer: https://github.com/docker/cli/blob/v20.10.16/cli/config/credentials/file_store.go#L44 + if credentials.ConvertToHostname(authConfig.ServerAddress) != parsedURL.Host { + return nil, fmt.Errorf("no auth config for '%s' in the docker-registry Secret '%s'", parsedURL.Host, secret.Name) + } + username = authConfig.Username + password = authConfig.Password + } else { + username, password = string(secret.Data["username"]), string(secret.Data["password"]) + } + switch { + case username == "" && password == "": + return oci.Anonymous{}, nil + case username == "" || password == "": + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) + } + return authn.NewKeychainFromHelper(helper{registry: parsedURL.Host, username: username, password: password}), nil +} + +// KeyChainAdaptHelper returns an ORAS credentials callback configured with the authorization data +// from the given authn keychain. This allows for example to make use of credential helpers from +// cloud providers. +// Ref: https://github.com/google/go-containerregistry/tree/main/pkg/authn +func KeychainAdaptHelper(keyChain authn.Keychain) func(string) (registry.LoginOption, error) { + return func(registryURL string) (registry.LoginOption, error) { + parsedURL, err := url.Parse(registryURL) + if err != nil { + return nil, fmt.Errorf("unable to parse registry URL '%s'", registryURL) + } + authenticator, err := keyChain.Resolve(common.StringResource{Registry: parsedURL.Host}) + if err != nil { + return nil, fmt.Errorf("unable to resolve credentials for registry '%s': %w", registryURL, err) + } + + return AuthAdaptHelper(authenticator) + } +} + +// AuthAdaptHelper returns an ORAS credentials callback configured with the authorization data +// from the given authn authenticator. This allows for example to make use of credential helpers from +// cloud providers. +// Ref: https://github.com/google/go-containerregistry/tree/main/pkg/authn +func AuthAdaptHelper(auth authn.Authenticator) (registry.LoginOption, error) { + authConfig, err := auth.Authorization() + if err != nil { + return nil, fmt.Errorf("unable to get authentication data from OIDC: %w", err) + } + + username := authConfig.Username + password := authConfig.Password + + switch { + case username == "" && password == "": + return nil, nil + case username == "" || password == "": + return nil, fmt.Errorf("invalid auth data: required fields 'username' and 'password'") + } + return registry.LoginOptBasicAuth(username, password), nil +} + +// NewLoginOption returns a registry login option for the given HelmRepository. +// If the HelmRepository does not specify a secretRef, a nil login option is returned. +func NewLoginOption(auth authn.Authenticator, keychain authn.Keychain, registryURL string) (registry.LoginOption, error) { + if auth != nil { + return AuthAdaptHelper(auth) + } + + if keychain != nil { + return KeychainAdaptHelper(keychain)(registryURL) + } + + return nil, nil +} + +// TLSLoginOption returns a LoginOption that can be used to configure the TLS client. +// It requires either the caFile or both certFile and keyFile to be not blank. +func TLSLoginOption(certFile, keyFile, caFile string) registry.LoginOption { + if (certFile != "" && keyFile != "") || caFile != "" { + return registry.LoginOptTLSClientConfig(certFile, keyFile, caFile) + } + + return nil +} diff --git a/internal/helm/registry/auth_test.go b/internal/helm/registry/auth_test.go new file mode 100644 index 000000000..14942a5bb --- /dev/null +++ b/internal/helm/registry/auth_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "net/url" + "testing" + + "github.com/google/go-containerregistry/pkg/authn" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" +) + +const repoURL = "https://example.com" + +func TestLoginOptionFromSecret(t *testing.T) { + testURL := "oci://registry.example.com/foo/bar" + testUser := "flux" + testPassword := "somepassword" + testDockerconfigjson := `{"auths":{"registry.example.com":{"username":"flux","password":"somepassword","auth":"Zmx1eDpzb21lcGFzc3dvcmQ="}}}` + testDockerconfigjsonHTTPS := `{"auths":{"https://registry.example.com":{"username":"flux","password":"somepassword","auth":"Zmx1eDpzb21lcGFzc3dvcmQ="}}}` + dockerconfigjsonKey := ".dockerconfigjson" + + tests := []struct { + name string + url string + secretType corev1.SecretType + secretData map[string][]byte + wantErr bool + }{ + { + name: "generic secret", + url: testURL, + secretType: corev1.SecretTypeOpaque, + secretData: map[string][]byte{ + "username": []byte(testUser), + "password": []byte(testPassword), + }, + }, + { + name: "generic secret without username", + url: testURL, + secretType: corev1.SecretTypeOpaque, + secretData: map[string][]byte{ + "password": []byte(testPassword), + }, + wantErr: true, + }, + { + name: "generic secret without password", + url: testURL, + secretType: corev1.SecretTypeOpaque, + secretData: map[string][]byte{ + "username": []byte(testUser), + }, + wantErr: true, + }, + { + name: "generic secret without username and password", + url: testURL, + secretType: corev1.SecretTypeOpaque, + }, + { + name: "docker-registry secret", + url: testURL, + secretType: corev1.SecretTypeDockerConfigJson, + secretData: map[string][]byte{ + dockerconfigjsonKey: []byte(testDockerconfigjson), + }, + }, + { + name: "docker-registry secret host mismatch", + url: "oci://registry.gitlab.com", + secretType: corev1.SecretTypeDockerConfigJson, + secretData: map[string][]byte{ + dockerconfigjsonKey: []byte(testDockerconfigjson), + }, + wantErr: true, + }, + { + name: "docker-registry secret invalid host", + url: "oci://registry .gitlab.com", + secretType: corev1.SecretTypeDockerConfigJson, + secretData: map[string][]byte{ + dockerconfigjsonKey: []byte(testDockerconfigjson), + }, + wantErr: true, + }, + { + name: "docker-registry secret invalid docker config", + url: testURL, + secretType: corev1.SecretTypeDockerConfigJson, + secretData: map[string][]byte{ + dockerconfigjsonKey: []byte("foo"), + }, + wantErr: true, + }, + { + name: "docker-registry secret with URL scheme", + url: testURL, + secretType: corev1.SecretTypeDockerConfigJson, + secretData: map[string][]byte{ + dockerconfigjsonKey: []byte(testDockerconfigjsonHTTPS), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + secret := corev1.Secret{} + secret.Name = "test-secret" + secret.Data = tt.secretData + secret.Type = tt.secretType + + _, err := LoginOptionFromSecret(tt.url, secret) + g.Expect(err != nil).To(Equal(tt.wantErr)) + }) + } +} + +func TestKeychainAdaptHelper(t *testing.T) { + g := NewWithT(t) + reg, err := url.Parse(repoURL) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + auth := helper{ + username: "flux", + password: "flux_password", + registry: reg.Host, + } + + tests := []struct { + name string + auth authn.Keychain + expectedLogin bool + wantErr bool + }{ + { + name: "Login from basic auth with empty auth", + auth: authn.NewKeychainFromHelper(helper{}), + expectedLogin: false, + wantErr: false, + }, + { + name: "Login from basic auth", + auth: authn.NewKeychainFromHelper(auth), + expectedLogin: true, + wantErr: false, + }, + { + name: "Login with missing password", + auth: authn.NewKeychainFromHelper(helper{username: "flux", registry: reg.Host}), + expectedLogin: false, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + loginOpt, err := KeychainAdaptHelper(tt.auth)(repoURL) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + + if tt.expectedLogin { + g.Expect(loginOpt).ToNot(BeNil()) + } else { + g.Expect(loginOpt).To(BeNil()) + } + }) + } +} diff --git a/internal/helm/registry/client.go b/internal/helm/registry/client.go new file mode 100644 index 000000000..5b89ea12e --- /dev/null +++ b/internal/helm/registry/client.go @@ -0,0 +1,83 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "crypto/tls" + "io" + "net/http" + "os" + + "helm.sh/helm/v3/pkg/registry" + "k8s.io/apimachinery/pkg/util/errors" +) + +// ClientGenerator generates a registry client and a temporary credential file. +// The client is meant to be used for a single reconciliation. +// The file is meant to be used for a single reconciliation and deleted after. +func ClientGenerator(tlsConfig *tls.Config, isLogin, insecureHTTP bool) (*registry.Client, string, error) { + if isLogin { + // create a temporary file to store the credentials + // this is needed because otherwise the credentials are stored in ~/.docker/config.json. + credentialsFile, err := os.CreateTemp("", "credentials") + if err != nil { + return nil, "", err + } + + var errs []error + rClient, err := newClient(credentialsFile.Name(), tlsConfig, insecureHTTP) + if err != nil { + errs = append(errs, err) + // attempt to delete the temporary file + if credentialsFile != nil { + err := os.Remove(credentialsFile.Name()) + if err != nil { + errs = append(errs, err) + } + } + return nil, "", errors.NewAggregate(errs) + } + return rClient, credentialsFile.Name(), nil + } + + rClient, err := newClient("", tlsConfig, insecureHTTP) + if err != nil { + return nil, "", err + } + return rClient, "", nil +} + +func newClient(credentialsFile string, tlsConfig *tls.Config, insecureHTTP bool) (*registry.Client, error) { + opts := []registry.ClientOption{ + registry.ClientOptWriter(io.Discard), + } + if insecureHTTP { + opts = append(opts, registry.ClientOptPlainHTTP()) + } + if tlsConfig != nil { + t := http.DefaultTransport.(*http.Transport).Clone() + t.TLSClientConfig = tlsConfig + opts = append(opts, registry.ClientOptHTTPClient(&http.Client{ + Transport: t, + })) + } + if credentialsFile != "" { + opts = append(opts, registry.ClientOptCredentialsFile(credentialsFile)) + } + + return registry.NewClient(opts...) +} diff --git a/internal/helm/repository.go b/internal/helm/repository.go deleted file mode 100644 index ee9453791..000000000 --- a/internal/helm/repository.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/url" - "path" - "sort" - "strings" - - "github.com/Masterminds/semver/v3" - "helm.sh/helm/v3/pkg/getter" - "helm.sh/helm/v3/pkg/repo" - "sigs.k8s.io/yaml" - - "github.com/fluxcd/pkg/version" -) - -// ChartRepository represents a Helm chart repository, and the configuration -// required to download the chart index, and charts from the repository. -type ChartRepository struct { - URL string - Index *repo.IndexFile - Client getter.Getter - Options []getter.Option -} - -// NewChartRepository constructs and returns a new ChartRepository with -// the ChartRepository.Client configured to the getter.Getter for the -// repository URL scheme. It returns an error on URL parsing failures, -// or if there is no getter available for the scheme. -func NewChartRepository(repositoryURL string, providers getter.Providers, opts []getter.Option) (*ChartRepository, error) { - u, err := url.Parse(repositoryURL) - if err != nil { - return nil, err - } - c, err := providers.ByScheme(u.Scheme) - if err != nil { - return nil, err - } - return &ChartRepository{ - URL: repositoryURL, - Client: c, - Options: opts, - }, nil -} - -// Get returns the repo.ChartVersion for the given name, the version is expected -// to be a semver.Constraints compatible string. If version is empty, the latest -// stable version will be returned and prerelease versions will be ignored. -func (r *ChartRepository) Get(name, ver string) (*repo.ChartVersion, error) { - cvs, ok := r.Index.Entries[name] - if !ok { - return nil, repo.ErrNoChartName - } - if len(cvs) == 0 { - return nil, repo.ErrNoChartVersion - } - - // Check for exact matches first - if len(ver) != 0 { - for _, cv := range cvs { - if ver == cv.Version { - return cv, nil - } - } - } - - // Continue to look for a (semantic) version match - verConstraint, err := semver.NewConstraint("*") - if err != nil { - return nil, err - } - latestStable := len(ver) == 0 || ver == "*" - if !latestStable { - verConstraint, err = semver.NewConstraint(ver) - if err != nil { - return nil, err - } - } - - // Filter out chart versions that doesn't satisfy constraints if any, - // parse semver and build a lookup table - var matchedVersions semver.Collection - lookup := make(map[*semver.Version]*repo.ChartVersion) - for _, cv := range cvs { - v, err := version.ParseVersion(cv.Version) - if err != nil { - continue - } - - if !verConstraint.Check(v) { - continue - } - - matchedVersions = append(matchedVersions, v) - lookup[v] = cv - } - if len(matchedVersions) == 0 { - return nil, fmt.Errorf("no chart version found for %s-%s", name, ver) - } - - // Sort versions - sort.SliceStable(matchedVersions, func(i, j int) bool { - // Reverse - return !(func() bool { - left := matchedVersions[i] - right := matchedVersions[j] - - if !left.Equal(right) { - return left.LessThan(right) - } - - // Having chart creation timestamp at our disposal, we put package with the - // same version into a chronological order. This is especially important for - // versions that differ only by build metadata, because it is not considered - // a part of the comparable version in Semver - return lookup[left].Created.Before(lookup[right].Created) - })() - }) - - latest := matchedVersions[0] - return lookup[latest], nil -} - -// DownloadChart confirms the given repo.ChartVersion has a downloadable URL, -// and then attempts to download the chart using the Client and Options of the -// ChartRepository. It returns a bytes.Buffer containing the chart data. -func (r *ChartRepository) DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer, error) { - if len(chart.URLs) == 0 { - return nil, fmt.Errorf("chart %q has no downloadable URLs", chart.Name) - } - - // TODO(hidde): according to the Helm source the first item is not - // always the correct one to pick, check for updates once in awhile. - // Ref: https://github.com/helm/helm/blob/v3.3.0/pkg/downloader/chart_downloader.go#L241 - ref := chart.URLs[0] - u, err := url.Parse(ref) - if err != nil { - err = fmt.Errorf("invalid chart URL format '%s': %w", ref, err) - return nil, err - } - - // Prepend the chart repository base URL if the URL is relative - if !u.IsAbs() { - repoURL, err := url.Parse(r.URL) - if err != nil { - err = fmt.Errorf("invalid chart repository URL format '%s': %w", r.URL, err) - return nil, err - } - q := repoURL.Query() - // Trailing slash is required for ResolveReference to work - repoURL.Path = strings.TrimSuffix(repoURL.Path, "/") + "/" - u = repoURL.ResolveReference(u) - u.RawQuery = q.Encode() - } - - return r.Client.Get(u.String(), r.Options...) -} - -// LoadIndex loads the given bytes into the Index while performing -// minimal validity checks. It fails if the API version is not set -// (repo.ErrNoAPIVersion), or if the unmarshal fails. -// -// The logic is derived from and on par with: -// https://github.com/helm/helm/blob/v3.3.4/pkg/repo/index.go#L301 -func (r *ChartRepository) LoadIndex(b []byte) error { - i := &repo.IndexFile{} - if err := yaml.UnmarshalStrict(b, i); err != nil { - return err - } - if i.APIVersion == "" { - return repo.ErrNoAPIVersion - } - i.SortEntries() - r.Index = i - return nil -} - -// DownloadIndex attempts to download the chart repository index using -// the Client and set Options, and loads the index file into the Index. -// It returns an error on URL parsing and Client failures. -func (r *ChartRepository) DownloadIndex() error { - u, err := url.Parse(r.URL) - if err != nil { - return err - } - u.RawPath = path.Join(u.RawPath, "index.yaml") - u.Path = path.Join(u.Path, "index.yaml") - - res, err := r.Client.Get(u.String(), r.Options...) - if err != nil { - return err - } - b, err := ioutil.ReadAll(res) - if err != nil { - return err - } - - return r.LoadIndex(b) -} diff --git a/internal/helm/repository/chart_repository.go b/internal/helm/repository/chart_repository.go new file mode 100644 index 000000000..e8030ec7b --- /dev/null +++ b/internal/helm/repository/chart_repository.go @@ -0,0 +1,532 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "path" + "sort" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" + "github.com/opencontainers/go-digest" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/repo" + "sigs.k8s.io/yaml" + + "github.com/fluxcd/pkg/version" + + "github.com/fluxcd/pkg/http/transport" + "github.com/fluxcd/source-controller/internal/helm" + "github.com/fluxcd/source-controller/internal/oci" +) + +var ( + ErrNoChartIndex = errors.New("no chart index") +) + +// IndexFromFile loads a repo.IndexFile from the given path. It returns an +// error if the file does not exist, is not a regular file, exceeds the +// maximum index file size, or if the file cannot be parsed. +func IndexFromFile(path string) (*repo.IndexFile, error) { + st, err := os.Lstat(path) + if err != nil { + return nil, err + } + if !st.Mode().IsRegular() { + return nil, fmt.Errorf("%s is not a regular file", path) + } + if st.Size() > helm.MaxIndexSize { + return nil, fmt.Errorf("%s exceeds the maximum index file size of %d bytes", path, helm.MaxIndexSize) + } + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return IndexFromBytes(b) +} + +// IndexFromBytes loads a repo.IndexFile from the given bytes. It returns an +// error if the bytes cannot be parsed, or if the API version is not set. +// The entries are sorted before the index is returned. +func IndexFromBytes(b []byte) (*repo.IndexFile, error) { + if len(b) == 0 { + return nil, repo.ErrEmptyIndexYaml + } + + i := &repo.IndexFile{} + if err := jsonOrYamlUnmarshal(b, i); err != nil { + return nil, err + } + + if i.APIVersion == "" { + return nil, repo.ErrNoAPIVersion + } + + for name, cvs := range i.Entries { + for idx := len(cvs) - 1; idx >= 0; idx-- { + if cvs[idx] == nil { + continue + } + // When metadata section missing, initialize with no data + if cvs[idx].Metadata == nil { + cvs[idx].Metadata = &chart.Metadata{} + } + if cvs[idx].APIVersion == "" { + cvs[idx].APIVersion = chart.APIVersionV1 + } + if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil { + cvs = append(cvs[:idx], cvs[idx+1:]...) + } + } + // adjust slice to only contain a set of valid versions + i.Entries[name] = cvs + } + + i.SortEntries() + return i, nil +} + +// ChartRepository represents a Helm chart repository, and the configuration +// required to download the chart index and charts from the repository. +// All methods are thread safe unless defined otherwise. +type ChartRepository struct { + // URL the ChartRepository's index.yaml can be found at, + // without the index.yaml suffix. + URL string + // Path is the absolute path to the Index file. + Path string + // Index of the ChartRepository. + Index *repo.IndexFile + + // Client to use while downloading the Index or a chart from the URL. + Client getter.Getter + // Options to configure the Client with while downloading the Index + // or a chart from the URL. + Options []getter.Option + + tlsConfig *tls.Config + + cached bool + digests map[digest.Algorithm]digest.Digest + + *sync.RWMutex +} + +// NewChartRepository constructs and returns a new ChartRepository with +// the ChartRepository.Client configured to the getter.Getter for the +// repository URL scheme. It returns an error on URL parsing failures, +// or if there is no getter available for the scheme. +func NewChartRepository(URL, path string, providers getter.Providers, tlsConfig *tls.Config, getterOpts ...getter.Option) (*ChartRepository, error) { + u, err := url.Parse(URL) + if err != nil { + return nil, err + } + c, err := providers.ByScheme(u.Scheme) + if err != nil { + return nil, err + } + + r := newChartRepository() + r.URL = URL + r.Path = path + r.Client = c + r.Options = getterOpts + r.tlsConfig = tlsConfig + + return r, nil +} + +func newChartRepository() *ChartRepository { + return &ChartRepository{ + digests: make(map[digest.Algorithm]digest.Digest, 0), + RWMutex: &sync.RWMutex{}, + } +} + +// GetChartVersion returns the repo.ChartVersion for the given name, the version is expected +// to be a semver.Constraints compatible string. If version is empty, the latest +// stable version will be returned and prerelease versions will be ignored. +func (r *ChartRepository) GetChartVersion(name, ver string) (*repo.ChartVersion, error) { + // See if we already have the index in cache or try to load it. + if err := r.StrategicallyLoadIndex(); err != nil { + return nil, &ErrExternal{Err: err} + } + + cv, err := r.getChartVersion(name, ver) + if err != nil { + return nil, &ErrReference{Err: err} + } + return cv, nil +} + +func (r *ChartRepository) getChartVersion(name, ver string) (*repo.ChartVersion, error) { + r.RLock() + defer r.RUnlock() + + if r.Index == nil { + return nil, ErrNoChartIndex + } + cvs, ok := r.Index.Entries[name] + if !ok { + return nil, repo.ErrNoChartName + } + if len(cvs) == 0 { + return nil, repo.ErrNoChartVersion + } + + // Check for exact matches first + if len(ver) != 0 { + for _, cv := range cvs { + if ver == cv.Version { + return cv, nil + } + } + } + + // Continue to look for a (semantic) version match + verConstraint, err := semver.NewConstraint("*") + if err != nil { + return nil, err + } + latestStable := len(ver) == 0 || ver == "*" + if !latestStable { + verConstraint, err = semver.NewConstraint(ver) + if err != nil { + return nil, err + } + } + + // Filter out chart versions that don't satisfy constraints if any, + // parse semver and build a lookup table + var matchedVersions semver.Collection + lookup := make(map[*semver.Version]*repo.ChartVersion, 0) + for _, cv := range cvs { + v, err := version.ParseVersion(cv.Version) + if err != nil { + continue + } + + if !verConstraint.Check(v) { + continue + } + + matchedVersions = append(matchedVersions, v) + lookup[v] = cv + } + if len(matchedVersions) == 0 { + return nil, fmt.Errorf("no '%s' chart with version matching '%s' found", name, ver) + } + + // Sort versions + sort.SliceStable(matchedVersions, func(i, j int) bool { + // Reverse + return !(func() bool { + left := matchedVersions[i] + right := matchedVersions[j] + + if !left.Equal(right) { + return left.LessThan(right) + } + + // Having chart creation timestamp at our disposal, we put package with the + // same version into a chronological order. This is especially important for + // versions that differ only by build metadata, because it is not considered + // a part of the comparable version in Semver + return lookup[left].Created.Before(lookup[right].Created) + })() + }) + + latest := matchedVersions[0] + return lookup[latest], nil +} + +// DownloadChart confirms the given repo.ChartVersion has a downloadable URL, +// and then attempts to download the chart using the Client and Options of the +// ChartRepository. It returns a bytes.Buffer containing the chart data. +func (r *ChartRepository) DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer, error) { + if len(chart.URLs) == 0 { + return nil, fmt.Errorf("chart '%s' has no downloadable URLs", chart.Name) + } + + // TODO(hidde): according to the Helm source the first item is not + // always the correct one to pick, check for updates once in awhile. + // Ref: https://github.com/helm/helm/blob/v3.3.0/pkg/downloader/chart_downloader.go#L241 + ref := chart.URLs[0] + resolvedUrl, err := repo.ResolveReferenceURL(r.URL, ref) + if err != nil { + return nil, err + } + + t := transport.NewOrIdle(r.tlsConfig) + clientOpts := append(r.Options, getter.WithTransport(t)) + defer transport.Release(t) + + return r.Client.Get(resolvedUrl, clientOpts...) +} + +// CacheIndex attempts to write the index from the remote into a new temporary file +// using DownloadIndex, and sets Path and cached. +// The caller is expected to handle the garbage collection of Path, and to +// load the Index separately using LoadFromPath if required. +func (r *ChartRepository) CacheIndex() error { + f, err := os.CreateTemp("", "chart-index-*.yaml") + if err != nil { + return fmt.Errorf("failed to create temp file to cache index to: %w", err) + } + + if err = r.DownloadIndex(f, helm.MaxIndexSize); err != nil { + f.Close() + removeErr := os.Remove(f.Name()) + if removeErr != nil { + err = errors.Join(err, removeErr) + } + return fmt.Errorf("failed to cache index to temporary file: %w", err) + } + + if err = f.Close(); err != nil { + removeErr := os.Remove(f.Name()) + if removeErr != nil { + err = errors.Join(err, removeErr) + } + return fmt.Errorf("failed to close cached index file '%s': %w", f.Name(), err) + } + + r.Lock() + r.Path = f.Name() + r.Index = nil + r.cached = true + r.invalidate() + r.Unlock() + + return nil +} + +// StrategicallyLoadIndex lazy-loads the Index if required, first +// attempting to load it from Path if the file exists, before falling +// back to caching it. +func (r *ChartRepository) StrategicallyLoadIndex() (err error) { + if r.HasIndex() { + return + } + + if !r.HasFile() { + if err = r.CacheIndex(); err != nil { + err = fmt.Errorf("failed to cache index: %w", err) + return + } + } + + if err = r.LoadFromPath(); err != nil { + err = fmt.Errorf("failed to load index: %w", err) + return + } + return +} + +// LoadFromPath attempts to load the Index from the configured Path. +// It returns an error if no Path is set, or if the load failed. +func (r *ChartRepository) LoadFromPath() error { + r.Lock() + defer r.Unlock() + + if len(r.Path) == 0 { + return fmt.Errorf("no cache path") + } + + i, err := IndexFromFile(r.Path) + if err != nil { + return fmt.Errorf("failed to load index: %w", err) + } + + r.Index = i + return nil +} + +// DownloadIndex attempts to download the chart repository index using +// the Client and set Options, and writes the index to the given io.Writer. +// Upon download, the index is copied to the writer if the index size +// does not exceed the maximum index file size. Otherwise, it returns an error. +// A url.Error is returned if the URL failed to parse. +func (r *ChartRepository) DownloadIndex(w io.Writer, maxSize int64) (err error) { + r.RLock() + defer r.RUnlock() + + u, err := url.Parse(r.URL) + if err != nil { + return err + } + u.RawPath = path.Join(u.RawPath, "index.yaml") + u.Path = path.Join(u.Path, "index.yaml") + + t := transport.NewOrIdle(r.tlsConfig) + clientOpts := append(r.Options, getter.WithTransport(t)) + defer transport.Release(t) + + var res *bytes.Buffer + res, err = r.Client.Get(u.String(), clientOpts...) + if err != nil { + return err + } + + if int64(res.Len()) > maxSize { + return fmt.Errorf("index exceeds the maximum index file size of %d bytes", maxSize) + } + + if _, err = io.Copy(w, res); err != nil { + return err + } + return nil +} + +// Digest returns the digest of the file at the ChartRepository's Path. +func (r *ChartRepository) Digest(algorithm digest.Algorithm) digest.Digest { + if !r.HasFile() { + return "" + } + + r.Lock() + defer r.Unlock() + + if _, ok := r.digests[algorithm]; !ok { + if f, err := os.Open(r.Path); err == nil { + defer f.Close() + rd := io.LimitReader(f, helm.MaxIndexSize) + if d, err := algorithm.FromReader(rd); err == nil { + r.digests[algorithm] = d + } + } + } + return r.digests[algorithm] +} + +// ToJSON returns the index formatted as JSON. +func (r *ChartRepository) ToJSON() ([]byte, error) { + if !r.HasIndex() { + return nil, fmt.Errorf("index not loaded yet") + } + + return json.MarshalIndent(r.Index, "", " ") +} + +// HasIndex returns true if the Index is not nil. +func (r *ChartRepository) HasIndex() bool { + r.RLock() + defer r.RUnlock() + + return r.Index != nil +} + +// HasFile returns true if Path exists and is a regular file. +func (r *ChartRepository) HasFile() bool { + r.RLock() + defer r.RUnlock() + + if r.Path != "" { + if stat, err := os.Lstat(r.Path); err == nil { + return stat.Mode().IsRegular() + } + } + return false +} + +// Clear clears the Index and removes the file at Path, if cached. +func (r *ChartRepository) Clear() error { + r.Lock() + defer r.Unlock() + + r.Index = nil + + if r.cached { + if err := os.Remove(r.Path); err != nil { + return fmt.Errorf("failed to remove cached index: %w", err) + } + r.Path = "" + r.cached = false + } + + r.invalidate() + return nil +} + +// Invalidate clears any cached digests. +func (r *ChartRepository) Invalidate() { + r.Lock() + defer r.Unlock() + + r.invalidate() +} + +func (r *ChartRepository) invalidate() { + r.digests = make(map[digest.Algorithm]digest.Digest, 0) +} + +// VerifyChart verifies the chart against a signature. +// It returns an error on failure. +func (r *ChartRepository) VerifyChart(_ context.Context, _ *repo.ChartVersion) (oci.VerificationResult, error) { + // this is a no-op because this is not implemented yet. + return oci.VerificationResultIgnored, fmt.Errorf("not implemented") +} + +// jsonOrYamlUnmarshal unmarshals the given byte slice containing JSON or YAML +// into the provided interface. +// +// It automatically detects whether the data is in JSON or YAML format by +// checking its validity as JSON. If the data is valid JSON, it will use the +// `encoding/json` package to unmarshal it. Otherwise, it will use the +// `sigs.k8s.io/yaml` package to unmarshal the YAML data. +// +// Can potentially be replaced when Helm PR for JSON support has been merged. +// xref: https://github.com/helm/helm/pull/12245 +func jsonOrYamlUnmarshal(b []byte, i interface{}) error { + if json.Valid(b) { + return json.Unmarshal(b, i) + } + return yaml.UnmarshalStrict(b, i) +} + +// ignoreSkippableChartValidationError inspect the given error and returns nil if +// the error isn't important for index loading +// +// In particular, charts may introduce validations that don't impact repository indexes +// And repository indexes may be generated by older/non-complient software, which doesn't +// conform to all validations. +// +// this code is taken from https://github.com/helm/helm/blob/v3.15.2/pkg/repo/index.go#L402 +func ignoreSkippableChartValidationError(err error) error { + verr, ok := err.(chart.ValidationError) + if !ok { + return err + } + + // https://github.com/helm/helm/issues/12748 (JFrog repository strips alias field from index) + if strings.HasPrefix(verr.Error(), "validation: more than one dependency with name or alias") { + return nil + } + + return err +} diff --git a/internal/helm/repository/chart_repository_test.go b/internal/helm/repository/chart_repository_test.go new file mode 100644 index 000000000..1b2f1c0fb --- /dev/null +++ b/internal/helm/repository/chart_repository_test.go @@ -0,0 +1,883 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "bytes" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "sync" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/opencontainers/go-digest" + "helm.sh/helm/v3/pkg/chart" + helmgetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/helm" +) + +var now = time.Now() + +const ( + testFile = "../testdata/local-index.yaml" + chartmuseumTestFile = "../testdata/chartmuseum-index.yaml" + chartmuseumJSONTestFile = "../testdata/chartmuseum-index.json" + unorderedTestFile = "../testdata/local-index-unordered.yaml" +) + +// mockGetter is a simple mocking getter.Getter implementation, returning +// a byte response to any provided URL. +type mockGetter struct { + Response []byte + LastCalledURL string +} + +func (g *mockGetter) Get(u string, _ ...helmgetter.Option) (*bytes.Buffer, error) { + r := g.Response + g.LastCalledURL = u + return bytes.NewBuffer(r), nil +} + +// Index load tests are derived from https://github.com/helm/helm/blob/v3.3.4/pkg/repo/index_test.go#L108 +// to ensure parity with Helm behaviour. +func TestIndexFromFile(t *testing.T) { + g := NewWithT(t) + + // Create an index file that exceeds the max index size. + tmpDir := t.TempDir() + bigIndexFile := filepath.Join(tmpDir, "index.yaml") + data := make([]byte, helm.MaxIndexSize+10) + g.Expect(os.WriteFile(bigIndexFile, data, 0o640)).ToNot(HaveOccurred()) + + tests := []struct { + name string + filename string + wantErr string + }{ + { + name: "regular index file", + filename: testFile, + }, + { + name: "chartmuseum index file", + filename: chartmuseumTestFile, + }, + { + name: "chartmuseum json index file", + filename: chartmuseumJSONTestFile, + }, + { + name: "error if index size exceeds max size", + filename: bigIndexFile, + wantErr: "exceeds the maximum index file size", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + i, err := IndexFromFile(tt.filename) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + + verifyLocalIndex(t, i) + }) + } +} + +func TestIndexFromBytes(t *testing.T) { + tests := []struct { + name string + b []byte + wantName string + wantVersion string + wantDigest string + wantErr string + }{ + { + name: "index", + b: []byte(` +apiVersion: v1 +entries: + nginx: + - urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" +`), + wantName: "nginx", + wantVersion: "0.2.0", + wantDigest: "sha256:1234567890abcdef", + }, + { + name: "index without API version", + b: []byte(`entries: + nginx: + - name: nginx`), + wantErr: "no API version specified", + }, + { + name: "index with duplicate entry", + b: []byte(`apiVersion: v1 +entries: + nginx: + - name: nginx" + nginx: + - name: nginx`), + wantErr: "key \"nginx\" already set in map", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + + i, err := IndexFromBytes(tt.b) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(i).To(BeNil()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(i).ToNot(BeNil()) + got, err := i.Get(tt.wantName, tt.wantVersion) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got.Digest).To(Equal(tt.wantDigest)) + }) + } +} + +func TestIndexFromBytes_Unordered(t *testing.T) { + b, err := os.ReadFile(unorderedTestFile) + if err != nil { + t.Fatal(err) + } + i, err := IndexFromBytes(b) + if err != nil { + t.Fatal(err) + } + verifyLocalIndex(t, i) +} + +func TestNewChartRepository(t *testing.T) { + repositoryURL := "https://example.com" + providers := helmgetter.Providers{ + helmgetter.Provider{ + Schemes: []string{"https"}, + New: helmgetter.NewHTTPGetter, + }, + } + options := []helmgetter.Option{helmgetter.WithBasicAuth("username", "password")} + + t.Run("should construct chart repository", func(t *testing.T) { + g := NewWithT(t) + + r, err := NewChartRepository(repositoryURL, "", providers, nil, options...) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r).ToNot(BeNil()) + g.Expect(r.URL).To(Equal(repositoryURL)) + g.Expect(r.Client).ToNot(BeNil()) + g.Expect(r.Options).To(Equal(options)) + }) + + t.Run("should error on URL parsing failure", func(t *testing.T) { + g := NewWithT(t) + r, err := NewChartRepository("https://ex ample.com", "", nil, nil, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(BeAssignableToTypeOf(&url.Error{})) + g.Expect(r).To(BeNil()) + + }) + + t.Run("should error on unsupported scheme", func(t *testing.T) { + g := NewWithT(t) + + r, err := NewChartRepository("http://example.com", "", providers, nil, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("scheme \"http\" not supported")) + g.Expect(r).To(BeNil()) + }) +} + +func TestChartRepository_GetChartVersion(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Index = repo.NewIndexFile() + charts := []struct { + name string + version string + url string + digest string + created time.Time + }{ + {name: "chart", version: "0.0.1", url: "http://example.com/charts", digest: "sha256:1234567890"}, + {name: "chart", version: "0.1.0", url: "http://example.com/charts", digest: "sha256:1234567890abc"}, + {name: "chart", version: "0.1.1", url: "http://example.com/charts", digest: "sha256:1234567890abc"}, + {name: "chart", version: "0.1.5+b.min.minute", url: "http://example.com/charts", digest: "sha256:1234567890abc", created: now.Add(-time.Minute)}, + {name: "chart", version: "0.1.5+a.min.hour", url: "http://example.com/charts", digest: "sha256:1234567890abc", created: now.Add(-time.Hour)}, + {name: "chart", version: "0.1.5+c.now", url: "http://example.com/charts", digest: "sha256:1234567890abc", created: now}, + {name: "chart", version: "0.2.0", url: "http://example.com/charts", digest: "sha256:1234567890abc"}, + {name: "chart", version: "1.0.0", url: "http://example.com/charts", digest: "sha256:1234567890abc"}, + {name: "chart", version: "1.1.0-rc.1", url: "http://example.com/charts", digest: "sha256:1234567890abc"}, + } + for _, c := range charts { + g.Expect(r.Index.MustAdd( + &chart.Metadata{Name: c.name, Version: c.version}, + fmt.Sprintf("%s-%s.tgz", c.name, c.version), c.url, c.digest), + ).To(Succeed()) + if !c.created.IsZero() { + r.Index.Entries["chart"][len(r.Index.Entries["chart"])-1].Created = c.created + } + } + r.Index.SortEntries() + + tests := []struct { + name string + chartName string + chartVersion string + wantVersion string + wantErr string + }{ + { + name: "exact match", + chartName: "chart", + chartVersion: "0.0.1", + wantVersion: "0.0.1", + }, + { + name: "stable version", + chartName: "chart", + chartVersion: "", + wantVersion: "1.0.0", + }, + { + name: "stable version (asterisk)", + chartName: "chart", + chartVersion: "*", + wantVersion: "1.0.0", + }, + { + name: "semver range", + chartName: "chart", + chartVersion: "<1.0.0", + wantVersion: "0.2.0", + }, + { + name: "unfulfilled range", + chartName: "chart", + chartVersion: ">2.0.0", + wantErr: "no 'chart' chart with version matching '>2.0.0' found", + }, + { + name: "invalid chart", + chartName: "non-existing", + wantErr: repo.ErrNoChartName.Error(), + }, + { + name: "match newest if ambiguous", + chartName: "chart", + chartVersion: "0.1.5", + wantVersion: "0.1.5+c.now", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + cv, err := r.GetChartVersion(tt.chartName, tt.chartVersion) + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + g.Expect(cv).To(BeNil()) + return + } + g.Expect(cv).ToNot(BeNil()) + g.Expect(cv.Metadata.Name).To(Equal(tt.chartName)) + g.Expect(cv.Metadata.Version).To(Equal(tt.wantVersion)) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestChartRepository_DownloadChart(t *testing.T) { + tests := []struct { + name string + url string + chartVersion *repo.ChartVersion + wantURL string + wantErr bool + }{ + { + name: "relative URL", + url: "https://example.com", + chartVersion: &repo.ChartVersion{ + Metadata: &chart.Metadata{Name: "chart"}, + URLs: []string{"charts/foo-1.0.0.tgz"}, + }, + wantURL: "https://example.com/charts/foo-1.0.0.tgz", + }, + { + name: "no chart URL", + chartVersion: &repo.ChartVersion{Metadata: &chart.Metadata{Name: "chart"}}, + wantErr: true, + }, + { + name: "invalid chart URL", + chartVersion: &repo.ChartVersion{ + Metadata: &chart.Metadata{Name: "chart"}, + URLs: []string{"https://ex ample.com/charts/foo-1.0.0.tgz"}, + }, + wantErr: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + + mg := mockGetter{} + r := &ChartRepository{ + URL: tt.url, + Client: &mg, + } + res, err := r.DownloadChart(tt.chartVersion) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(res).To(BeNil()) + return + } + g.Expect(mg.LastCalledURL).To(Equal(tt.wantURL)) + g.Expect(res).ToNot(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestChartRepository_CacheIndex(t *testing.T) { + g := NewWithT(t) + + mg := mockGetter{Response: []byte("foo")} + + r := newChartRepository() + r.URL = "https://example.com" + r.Client = &mg + r.digests["key"] = "value" + + err := r.CacheIndex() + g.Expect(err).To(Not(HaveOccurred())) + + g.Expect(r.Path).ToNot(BeEmpty()) + t.Cleanup(func() { _ = os.Remove(r.Path) }) + + g.Expect(r.Path).To(BeARegularFile()) + b, _ := os.ReadFile(r.Path) + g.Expect(b).To(Equal(mg.Response)) + + g.Expect(r.digests).To(BeEmpty()) +} + +func TestChartRepository_ToJSON(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Path = chartmuseumTestFile + + _, err := r.ToJSON() + g.Expect(err).To(HaveOccurred()) + + g.Expect(r.LoadFromPath()).To(Succeed()) + b, err := r.ToJSON() + g.Expect(err).ToNot(HaveOccurred()) + + jsonBytes, err := os.ReadFile(chartmuseumJSONTestFile) + jsonBytes = bytes.TrimRight(jsonBytes, "\n") + g.Expect(err).To(Not(HaveOccurred())) + g.Expect(string(b)).To(Equal(string(jsonBytes))) +} + +func TestChartRepository_DownloadIndex(t *testing.T) { + g := NewWithT(t) + + b, err := os.ReadFile(chartmuseumTestFile) + g.Expect(err).ToNot(HaveOccurred()) + + mg := mockGetter{Response: b} + r := &ChartRepository{ + URL: "https://example.com", + Client: &mg, + RWMutex: &sync.RWMutex{}, + } + + t.Run("download index", func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + g.Expect(r.DownloadIndex(buf, helm.MaxIndexSize)).To(Succeed()) + g.Expect(buf.Bytes()).To(Equal(b)) + g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) + g.Expect(err).To(BeNil()) + }) + + t.Run("download index size error", func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + g.Expect(r.DownloadIndex(buf, int64(len(b)-1))).To(HaveOccurred()) + g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) + }) +} + +func TestChartRepository_StrategicallyLoadIndex(t *testing.T) { + t.Run("loads from path", func(t *testing.T) { + g := NewWithT(t) + + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + + r := newChartRepository() + r.Path = i + + err := r.StrategicallyLoadIndex() + g.Expect(err).To(Succeed()) + g.Expect(r.Index).ToNot(BeNil()) + }) + + t.Run("loads from client", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Client = &mockGetter{ + Response: []byte(`apiVersion: v1`), + } + t.Cleanup(func() { + _ = os.Remove(r.Path) + }) + + err := r.StrategicallyLoadIndex() + g.Expect(err).To(Succeed()) + g.Expect(r.Path).ToNot(BeEmpty()) + g.Expect(r.Index).ToNot(BeNil()) + }) + + t.Run("skips if index is already loaded", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Index = repo.NewIndexFile() + + g.Expect(r.StrategicallyLoadIndex()).To(Succeed()) + }) +} + +func TestChartRepository_LoadFromPath(t *testing.T) { + t.Run("loads index", func(t *testing.T) { + g := NewWithT(t) + + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + + r := newChartRepository() + r.Path = i + + g.Expect(r.LoadFromPath()).To(Succeed()) + g.Expect(r.Index).ToNot(BeNil()) + }) + + t.Run("no cache path", func(t *testing.T) { + g := NewWithT(t) + + err := newChartRepository().LoadFromPath() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("no cache path")) + }) + + t.Run("index load error", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Path = filepath.Join(t.TempDir(), "index.yaml") + + err := r.LoadFromPath() + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue()) + }) +} + +func TestChartRepository_Digest(t *testing.T) { + t.Run("with algorithm", func(t *testing.T) { + g := NewWithT(t) + + p := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(repo.NewIndexFile().WriteFile(p, 0o600)).To(Succeed()) + + r := newChartRepository() + r.Path = p + + for _, algo := range []digest.Algorithm{digest.SHA256, digest.SHA512} { + t.Run(algo.String(), func(t *testing.T) { + g := NewWithT(t) + + d := r.Digest(algo) + g.Expect(d).ToNot(BeEmpty()) + g.Expect(d.Algorithm()).To(Equal(algo)) + g.Expect(r.digests[algo]).To(Equal(d)) + }) + } + }) + + t.Run("without path", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + g.Expect(r.Digest(digest.SHA256)).To(BeEmpty()) + }) + + t.Run("from cache", func(t *testing.T) { + g := NewWithT(t) + + algo := digest.SHA256 + expect := digest.Digest("sha256:fake") + + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + + r := newChartRepository() + r.Path = i + r.digests[algo] = expect + + g.Expect(r.Digest(algo)).To(Equal(expect)) + }) +} + +func TestChartRepository_HasIndex(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + g.Expect(r.HasIndex()).To(BeFalse()) + r.Index = repo.NewIndexFile() + g.Expect(r.HasIndex()).To(BeTrue()) +} + +func TestChartRepository_HasFile(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + g.Expect(r.HasFile()).To(BeFalse()) + + i := filepath.Join(t.TempDir(), "index.yaml") + g.Expect(os.WriteFile(i, []byte(`apiVersion: v1`), 0o600)).To(Succeed()) + r.Path = i + g.Expect(r.HasFile()).To(BeTrue()) +} + +func TestChartRepository_Clear(t *testing.T) { + t.Run("without index", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + g.Expect(r.Clear()).To(Succeed()) + }) + + t.Run("with index", func(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.Index = repo.NewIndexFile() + + g.Expect(r.Clear()).To(Succeed()) + g.Expect(r.Index).To(BeNil()) + }) + + t.Run("with index and cached path", func(t *testing.T) { + g := NewWithT(t) + + f, err := os.CreateTemp(t.TempDir(), "index-*.yaml") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f.Close()).To(Succeed()) + + r := newChartRepository() + r.Path = f.Name() + r.Index = repo.NewIndexFile() + r.digests["key"] = "value" + r.cached = true + + g.Expect(r.Clear()).To(Succeed()) + g.Expect(r.Index).To(BeNil()) + g.Expect(r.Path).To(BeEmpty()) + g.Expect(r.digests).To(BeEmpty()) + g.Expect(r.cached).To(BeFalse()) + }) + + t.Run("with path", func(t *testing.T) { + g := NewWithT(t) + + f, err := os.CreateTemp(t.TempDir(), "index-*.yaml") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f.Close()).To(Succeed()) + + r := newChartRepository() + r.Path = f.Name() + r.digests["key"] = "value" + + g.Expect(r.Clear()).To(Succeed()) + g.Expect(r.Path).ToNot(BeEmpty()) + g.Expect(r.Path).To(BeARegularFile()) + g.Expect(r.digests).To(BeEmpty()) + }) +} + +func TestChartRepository_Invalidate(t *testing.T) { + g := NewWithT(t) + + r := newChartRepository() + r.digests["key"] = "value" + + r.Invalidate() + g.Expect(r.digests).To(BeEmpty()) +} + +func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { + g := NewWithT(t) + + g.Expect(i.Entries).ToNot(BeNil()) + g.Expect(i.Entries).To(HaveLen(4), "expected 4 entries in index file") + + alpine, ok := i.Entries["alpine"] + g.Expect(ok).To(BeTrue(), "expected 'alpine' entry to exist") + g.Expect(alpine).To(HaveLen(1), "'alpine' should have 1 entry") + + nginx, ok := i.Entries["nginx"] + g.Expect(ok).To(BeTrue(), "expected 'nginx' entry to exist") + g.Expect(nginx).To(HaveLen(2), "'nginx' should have 2 entries") + + broken, ok := i.Entries["xChartWithDuplicateDependenciesAndMissingAlias"] + g.Expect(ok).To(BeTrue(), "expected 'xChartWithDuplicateDependenciesAndMissingAlias' entry to exist") + g.Expect(broken).To(HaveLen(1), "'xChartWithDuplicateDependenciesAndMissingAlias' should have 1 entries") + + expects := []*repo.ChartVersion{ + { + Metadata: &chart.Metadata{ + Name: "alpine", + Description: "string", + Version: "1.0.0", + Keywords: []string{"linux", "alpine", "small", "sumtin"}, + Home: "https://github.com/something", + }, + URLs: []string{ + "https://kubernetes-charts.storage.googleapis.com/alpine-1.0.0.tgz", + "http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz", + }, + Digest: "sha256:1234567890abcdef", + }, + { + Metadata: &chart.Metadata{ + Name: "nginx", + Description: "string", + Version: "0.2.0", + Keywords: []string{"popular", "web server", "proxy"}, + Home: "https://github.com/something/else", + }, + URLs: []string{ + "https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz", + }, + Digest: "sha256:1234567890abcdef", + }, + { + Metadata: &chart.Metadata{ + Name: "nginx", + Description: "string", + Version: "0.1.0", + Keywords: []string{"popular", "web server", "proxy"}, + Home: "https://github.com/something", + }, + URLs: []string{ + "https://kubernetes-charts.storage.googleapis.com/nginx-0.1.0.tgz", + }, + Digest: "sha256:1234567890abcdef", + }, + { + Metadata: &chart.Metadata{ + Name: "xChartWithDuplicateDependenciesAndMissingAlias", + Description: "string", + Version: "1.2.3", + Keywords: []string{"broken", "still accepted"}, + Home: "https://example.com/something", + Dependencies: []*chart.Dependency{ + {Name: "kube-rbac-proxy", Version: "0.9.1"}, + }, + }, + URLs: []string{ + "https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz", + }, + Digest: "sha256:1234567890abcdef", + }, + } + tests := []*repo.ChartVersion{alpine[0], nginx[0], nginx[1], broken[0]} + + for i, tt := range tests { + expect := expects[i] + g.Expect(tt.Name).To(Equal(expect.Name)) + g.Expect(tt.Description).To(Equal(expect.Description)) + g.Expect(tt.Version).To(Equal(expect.Version)) + g.Expect(tt.Digest).To(Equal(expect.Digest)) + g.Expect(tt.Home).To(Equal(expect.Home)) + g.Expect(tt.URLs).To(ContainElements(expect.URLs)) + g.Expect(tt.Keywords).To(ContainElements(expect.Keywords)) + g.Expect(tt.Dependencies).To(ContainElements(expect.Dependencies)) + } +} + +// This code is taken from https://github.com/helm/helm/blob/v3.15.2/pkg/repo/index_test.go#L601 +// and refers to: https://github.com/helm/helm/issues/12748 +func TestIgnoreSkippableChartValidationError(t *testing.T) { + type TestCase struct { + Input error + ErrorSkipped bool + } + testCases := map[string]TestCase{ + "nil": { + Input: nil, + }, + "generic_error": { + Input: fmt.Errorf("foo"), + }, + "non_skipped_validation_error": { + Input: chart.ValidationError("chart.metadata.type must be application or library"), + }, + "skipped_validation_error": { + Input: chart.ValidationErrorf("more than one dependency with name or alias %q", "foo"), + ErrorSkipped: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := ignoreSkippableChartValidationError(tc.Input) + + if tc.Input == nil { + if result != nil { + t.Error("expected nil result for nil input") + } + return + } + + if tc.ErrorSkipped { + if result != nil { + t.Error("expected nil result for skipped error") + } + return + } + + if tc.Input != result { + t.Error("expected the result equal to input") + } + + }) + } +} + +var indexWithFirstVersionInvalid = ` +apiVersion: v1 +entries: + nginx: + - urls: + - https://charts.helm.sh/stable/alpine-1.0.0.tgz + - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz + name: nginx + version: 0..1.0 + description: string + home: https://github.com/something + digest: "sha256:1234567890abcdef" + - urls: + - https://charts.helm.sh/stable/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" +` +var indexWithLastVersionInvalid = ` +apiVersion: v1 +entries: + nginx: + - urls: + - https://charts.helm.sh/stable/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" + - urls: + - https://charts.helm.sh/stable/alpine-1.0.0.tgz + - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz + name: nginx + version: 0..1.0 + description: string + home: https://github.com/something + digest: "sha256:1234567890abcdef" +` + +func TestIndexFromBytes_InvalidEntries(t *testing.T) { + tests := []struct { + source string + data string + }{ + { + source: "indexWithFirstVersionInvalid", + data: indexWithFirstVersionInvalid, + }, + { + source: "indexWithLastVersionInvalid", + data: indexWithLastVersionInvalid, + }, + } + for _, tc := range tests { + t.Run(tc.source, func(t *testing.T) { + idx, err := IndexFromBytes([]byte(tc.data)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + cvs := idx.Entries["nginx"] + if len(cvs) == 0 { + t.Error("expected one chart version not to be filtered out") + } + for _, v := range cvs { + if v.Version == "0..1.0" { + t.Error("malformed version was not filtered out") + } + } + }) + } +} diff --git a/internal/helm/repository/errors.go b/internal/helm/repository/errors.go new file mode 100644 index 000000000..d8d57059e --- /dev/null +++ b/internal/helm/repository/errors.go @@ -0,0 +1,47 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +// ErrReference indicate invalid chart reference. +type ErrReference struct { + Err error +} + +// Error implements the error interface. +func (er *ErrReference) Error() string { + return er.Err.Error() +} + +// Unwrap returns the underlying error. +func (er *ErrReference) Unwrap() error { + return er.Err +} + +// ErrExternal is a generic error for errors related to external API calls. +type ErrExternal struct { + Err error +} + +// Error implements the error interface. +func (ee *ErrExternal) Error() string { + return ee.Err.Error() +} + +// Unwrap returns the underlying error. +func (ee *ErrExternal) Unwrap() error { + return ee.Err +} diff --git a/internal/helm/repository/oci_chart_repository.go b/internal/helm/repository/oci_chart_repository.go new file mode 100644 index 000000000..2bed964a2 --- /dev/null +++ b/internal/helm/repository/oci_chart_repository.go @@ -0,0 +1,401 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "net/url" + "os" + "path" + "sort" + "strings" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" + + "github.com/Masterminds/semver/v3" + "github.com/google/go-containerregistry/pkg/name" + + "github.com/fluxcd/pkg/http/transport" + "github.com/fluxcd/pkg/version" + "github.com/fluxcd/source-controller/internal/oci" +) + +// RegistryClient is an interface for interacting with OCI registries +// It is used by the OCIChartRepository to retrieve chart versions +// from OCI registries +type RegistryClient interface { + Login(host string, opts ...registry.LoginOption) error + Logout(host string, opts ...registry.LogoutOption) error + Tags(url string) ([]string, error) +} + +// OCIChartRepository represents a Helm chart repository, and the configuration +// required to download the repository tags and charts from the repository. +// All methods are thread safe unless defined otherwise. +type OCIChartRepository struct { + // URL is the location of the repository. + URL url.URL + // Client to use while accessing the repository's contents. + Client getter.Getter + // Options to configure the Client with while downloading tags + // or a chart from the URL. + Options []getter.Option + + tlsConfig *tls.Config + + // RegistryClient is a client to use while downloading tags or charts from a registry. + RegistryClient RegistryClient + + // credentialsFile is a temporary credentials file to use while downloading tags or charts from a registry. + credentialsFile string + + // certificatesStore is a temporary store to use while downloading tags or charts from a registry. + certificatesStore string + + // verifiers is a list of verifiers to use when verifying a chart. + verifiers []oci.Verifier + + // insecureHTTP indicates that the chart is hosted on an insecure HTTP registry. + insecureHTTP bool +} + +// OCIChartRepositoryOption is a function that can be passed to NewOCIChartRepository +// to configure an OCIChartRepository. +type OCIChartRepositoryOption func(*OCIChartRepository) error + +// WithVerifiers returns a ChartRepositoryOption that will set the chart verifiers +func WithVerifiers(verifiers []oci.Verifier) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.verifiers = verifiers + return nil + } +} + +func WithInsecureHTTP() OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.insecureHTTP = true + return nil + } +} + +// WithOCIRegistryClient returns a ChartRepositoryOption that will set the registry client +func WithOCIRegistryClient(client RegistryClient) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.RegistryClient = client + return nil + } +} + +// WithOCIGetter returns a ChartRepositoryOption that will set the getter.Getter +func WithOCIGetter(providers getter.Providers) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + c, err := providers.ByScheme(r.URL.Scheme) + if err != nil { + return err + } + r.Client = c + return nil + } +} + +// WithOCIGetterOptions returns a ChartRepositoryOption that will set the getter.Options +func WithOCIGetterOptions(getterOpts []getter.Option) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.Options = getterOpts + return nil + } +} + +// WithCredentialsFile returns a ChartRepositoryOption that will set the credentials file +func WithCredentialsFile(credentialsFile string) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.credentialsFile = credentialsFile + return nil + } +} + +// WithCertificatesStore returns a ChartRepositoryOption that will set the certificates store +func WithCertificatesStore(store string) OCIChartRepositoryOption { + return func(r *OCIChartRepository) error { + r.certificatesStore = store + return nil + } +} + +// NewOCIChartRepository constructs and returns a new ChartRepository with +// the ChartRepository.Client configured to the getter.Getter for the +// repository URL scheme. It returns an error on URL parsing failures. +// It assumes that the url scheme has been validated to be an OCI scheme. +func NewOCIChartRepository(repositoryURL string, chartRepoOpts ...OCIChartRepositoryOption) (*OCIChartRepository, error) { + u, err := url.Parse(repositoryURL) + if err != nil { + return nil, err + } + + r := &OCIChartRepository{} + r.URL = *u + for _, opt := range chartRepoOpts { + if err := opt(r); err != nil { + return nil, err + } + } + + return r, nil +} + +// GetChartVersion returns the repo.ChartVersion for the given name, the version is expected +// to be a semver.Constraints compatible string. If version is empty, the latest +// stable version will be returned and prerelease versions will be ignored. +// adapted from https://github.com/helm/helm/blob/49819b4ef782e80b0c7f78c30bd76b51ebb56dc8/pkg/downloader/chart_downloader.go#L162 +func (r *OCIChartRepository) GetChartVersion(name, ver string) (*repo.ChartVersion, error) { + cv, err := r.getChartVersion(name, ver) + if err != nil { + return nil, &ErrExternal{Err: err} + } + return cv, nil +} + +func (r *OCIChartRepository) getChartVersion(name, ver string) (*repo.ChartVersion, error) { + cpURL := r.URL + cpURL.Path = path.Join(cpURL.Path, name) + + // if ver is a valid semver version, take a shortcut here so we don't need to list all tags which can be an + // expensive operation. + if _, err := version.ParseVersion(ver); err == nil { + return &repo.ChartVersion{ + URLs: []string{fmt.Sprintf("%s:%s", cpURL.String(), ver)}, + Metadata: &chart.Metadata{ + Name: name, + Version: ver, + }, + }, nil + } + + // ver doesn't denote a concrete version so we interpret it as a semver range and try to find the best-matching + // version from the list of tags in the registry. + + cvs, err := r.getTags(cpURL.String()) + if err != nil { + return nil, fmt.Errorf("could not get tags for %q: %s", name, err) + } + + if len(cvs) == 0 { + return nil, fmt.Errorf("unable to locate any tags in provided repository: %s", name) + } + + // Determine if version provided + // If empty, try to get the highest available tag + // If exact version, try to find it + // If semver constraint string, try to find a match + tag, err := getLastMatchingVersionOrConstraint(cvs, ver) + return &repo.ChartVersion{ + URLs: []string{fmt.Sprintf("%s:%s", cpURL.String(), tag)}, + Metadata: &chart.Metadata{ + Name: name, + Version: tag, + }, + }, err +} + +// This function shall be called for OCI registries only +// It assumes that the ref has been validated to be an OCI reference. +func (r *OCIChartRepository) getTags(ref string) ([]string, error) { + // Retrieve list of repository tags + tags, err := r.RegistryClient.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", registry.OCIScheme))) + if err != nil { + return nil, fmt.Errorf("could not fetch tags for %q: %s", ref, err) + } + if len(tags) == 0 { + return nil, fmt.Errorf("unable to locate any tags in provided repository: %s", ref) + } + + return tags, nil +} + +// DownloadChart confirms the given repo.ChartVersion has a downloadable URL, +// and then attempts to download the chart using the Client and Options of the +// ChartRepository. It returns a bytes.Buffer containing the chart data. +// In case of an OCI hosted chart, this function assumes that the chartVersion url is valid. +func (r *OCIChartRepository) DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer, error) { + if len(chart.URLs) == 0 { + return nil, fmt.Errorf("chart '%s' has no downloadable URLs", chart.Name) + } + + ref := chart.URLs[0] + u, err := url.Parse(ref) + if err != nil { + err = fmt.Errorf("invalid chart URL format '%s': %w", ref, err) + return nil, err + } + + t := transport.NewOrIdle(r.tlsConfig) + clientOpts := append(r.Options, getter.WithTransport(t)) + defer transport.Release(t) + + // trim the oci scheme prefix if needed + b, err := r.Client.Get(strings.TrimPrefix(u.String(), fmt.Sprintf("%s://", registry.OCIScheme)), clientOpts...) + if err != nil { + return nil, fmt.Errorf("failed to get '%s': %w", ref, err) + } + return b, nil +} + +// Login attempts to login to the OCI registry. +// It returns an error on failure. +func (r *OCIChartRepository) Login(opts ...registry.LoginOption) error { + err := r.RegistryClient.Login(r.URL.Host, opts...) + if err != nil { + return err + } + return nil +} + +// Logout attempts to logout from the OCI registry. +// It returns an error on failure. +func (r *OCIChartRepository) Logout() error { + err := r.RegistryClient.Logout(r.URL.Host) + if err != nil { + return err + } + return nil +} + +// HasCredentials returns true if the OCIChartRepository has credentials. +func (r *OCIChartRepository) HasCredentials() bool { + return r.credentialsFile != "" +} + +// Clear deletes the OCI registry credentials file. +func (r *OCIChartRepository) Clear() error { + var errs error + // clean the credentials file if it exists + if r.credentialsFile != "" { + if err := os.Remove(r.credentialsFile); err != nil { + errs = errors.Join(errs, err) + } + } + r.credentialsFile = "" + + // clean the certificates store if it exists + if r.certificatesStore != "" { + if err := os.RemoveAll(r.certificatesStore); err != nil { + errs = errors.Join(errs, err) + } + } + r.certificatesStore = "" + + return errs +} + +// getLastMatchingVersionOrConstraint returns the last version that matches the given version string. +// If the version string is empty, the highest available version is returned. +func getLastMatchingVersionOrConstraint(cvs []string, ver string) (string, error) { + // Check for exact matches first + if ver != "" { + for _, cv := range cvs { + if ver == cv { + return cv, nil + } + } + } + + // Continue to look for a (semantic) version match + verConstraint, err := semver.NewConstraint("*") + if err != nil { + return "", err + } + latestStable := ver == "" || ver == "*" + if !latestStable { + verConstraint, err = semver.NewConstraint(ver) + if err != nil { + return "", err + } + } + + matchingVersions := make([]*semver.Version, 0, len(cvs)) + for _, cv := range cvs { + v, err := version.ParseVersion(cv) + if err != nil { + continue + } + + if !verConstraint.Check(v) { + continue + } + + matchingVersions = append(matchingVersions, v) + } + if len(matchingVersions) == 0 { + return "", fmt.Errorf("could not locate a version matching provided version string %s", ver) + } + + // Sort versions + sort.Sort(sort.Reverse(semver.Collection(matchingVersions))) + + return matchingVersions[0].Original(), nil +} + +// VerifyChart verifies the chart against a signature. +// Supports signature verification using either cosign or notation providers. +// If no signature is provided, when cosign is used, a keyless verification is performed. +// The verification result is returned as a VerificationResult and any error encountered. +func (r *OCIChartRepository) VerifyChart(ctx context.Context, chart *repo.ChartVersion) (oci.VerificationResult, error) { + if len(r.verifiers) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("no verifiers available") + } + + if len(chart.URLs) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("chart '%s' has no downloadable URLs", chart.Name) + } + + var nameOpts []name.Option + if r.insecureHTTP { + nameOpts = append(nameOpts, name.Insecure) + } + + ref, err := name.ParseReference(strings.TrimPrefix(chart.URLs[0], fmt.Sprintf("%s://", registry.OCIScheme)), nameOpts...) + if err != nil { + return oci.VerificationResultFailed, fmt.Errorf("invalid chart reference: %s", err) + } + + verificationResult := oci.VerificationResultFailed + + // verify the chart + for _, verifier := range r.verifiers { + result, err := verifier.Verify(ctx, ref) + if err != nil { + return result, fmt.Errorf("failed to verify %s: %w", chart.URLs[0], err) + } + if result == oci.VerificationResultSuccess { + return result, nil + } + verificationResult = result + } + + if verificationResult == oci.VerificationResultIgnored { + return verificationResult, nil + } + + return oci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref.Name()) +} diff --git a/internal/helm/repository/oci_chart_repository_test.go b/internal/helm/repository/oci_chart_repository_test.go new file mode 100644 index 000000000..504d44e3e --- /dev/null +++ b/internal/helm/repository/oci_chart_repository_test.go @@ -0,0 +1,274 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "bytes" + "fmt" + "net/url" + "path" + "strings" + "testing" + + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/chart" + helmgetter "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +type OCIMockGetter struct { + Response []byte + LastCalledURL string +} + +func (g *OCIMockGetter) Get(u string, _ ...helmgetter.Option) (*bytes.Buffer, error) { + r := g.Response + g.LastCalledURL = u + return bytes.NewBuffer(r), nil +} + +type mockRegistryClient struct { + tags []string + LastCalledURL string +} + +func (m *mockRegistryClient) Tags(urlStr string) ([]string, error) { + m.LastCalledURL = urlStr + return m.tags, nil +} + +func (m *mockRegistryClient) Login(url string, opts ...registry.LoginOption) error { + m.LastCalledURL = url + return nil +} + +func (m *mockRegistryClient) Logout(url string, opts ...registry.LogoutOption) error { + m.LastCalledURL = url + return nil +} + +func TestNewOCIChartRepository(t *testing.T) { + registryClient := &mockRegistryClient{} + url := "oci://localhost:5000/my_repo" + providers := helmgetter.Providers{ + helmgetter.Provider{ + Schemes: []string{"oci"}, + New: helmgetter.NewOCIGetter, + }, + } + options := []helmgetter.Option{helmgetter.WithBasicAuth("username", "password")} + t.Run("should construct chart registry", func(t *testing.T) { + g := NewWithT(t) + r, err := NewOCIChartRepository(url, WithOCIGetter(providers), WithOCIGetterOptions(options), WithOCIRegistryClient(registryClient)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r).ToNot(BeNil()) + g.Expect(r.URL.Host).To(Equal("localhost:5000")) + g.Expect(r.Client).ToNot(BeNil()) + g.Expect(r.Options).To(Equal(options)) + g.Expect(r.RegistryClient).To(Equal(registryClient)) + }) + + t.Run("should return error on invalid url", func(t *testing.T) { + g := NewWithT(t) + r, err := NewOCIChartRepository("oci://localhost:5000 /my_repo", WithOCIGetter(providers), WithOCIGetterOptions(options), WithOCIRegistryClient(registryClient)) + g.Expect(err).To(HaveOccurred()) + g.Expect(r).To(BeNil()) + }) + +} + +func TestOCIChartRepository_Get(t *testing.T) { + registryClient := &mockRegistryClient{ + tags: []string{ + "0.0.1", + "0.1.0", + "0.1.1", + "0.1.5+b.min.minute", + "0.1.5+a.min.hour", + "0.1.5+c.now", + "0.2.0", + "0.9.0", + "0.10.0", + "1.0.0", + "1.1.0-rc.1", + }, + } + + providers := helmgetter.Providers{ + helmgetter.Provider{ + Schemes: []string{"oci"}, + New: helmgetter.NewOCIGetter, + }, + } + testURL := "oci://localhost:5000/my_repo" + + testCases := []struct { + name string + registryClient RegistryClient + url string + version string + expected string + expectedErr string + }{ + { + name: "should return latest stable version", + registryClient: registryClient, + version: "", + url: testURL, + expected: "1.0.0", + }, + { + name: "should return latest stable version (asterisk)", + registryClient: registryClient, + version: "*", + url: testURL, + expected: "1.0.0", + }, + { + name: "should return latest stable version (semver range)", + registryClient: registryClient, + version: ">=0.1.5", + url: testURL, + expected: "1.0.0", + }, + { + name: "should return 0.2.0 (semver range)", + registryClient: registryClient, + version: "0.2.x", + url: testURL, + expected: "0.2.0", + }, + { + name: "should return a perfect match", + registryClient: nil, + version: "0.1.0", + url: testURL, + expected: "0.1.0", + }, + { + name: "should return 0.10.0", + registryClient: registryClient, + version: "0.*", + url: testURL, + expected: "0.10.0", + }, + { + name: "should an error for unfulfilled range", + registryClient: registryClient, + version: ">2.0.0", + url: testURL, + expectedErr: "could not locate a version matching provided version string >2.0.0", + }, + { + name: "shouldn't error out with trailing slash", + registryClient: registryClient, + version: "", + url: "oci://localhost:5000/my_repo/", + expected: "1.0.0", + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + r, err := NewOCIChartRepository(tc.url, WithOCIRegistryClient(tc.registryClient), WithOCIGetter(providers)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r).ToNot(BeNil()) + + chart := "podinfo" + cv, err := r.GetChartVersion(chart, tc.version) + if tc.expectedErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tc.expectedErr)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + u, err := url.Parse(tc.url) + g.Expect(err).ToNot(HaveOccurred()) + u.Path = path.Join(u.Path, chart) + g.Expect(cv.URLs[0]).To(Equal(fmt.Sprintf("%s:%s", u.String(), tc.expected))) + g.Expect(registryClient.LastCalledURL).To(Equal(strings.TrimPrefix(u.String(), fmt.Sprintf("%s://", registry.OCIScheme)))) + }) + } +} + +func TestOCIChartRepository_DownloadChart(t *testing.T) { + testCases := []struct { + name string + url string + chartVersion *repo.ChartVersion + expected string + expectedErr bool + }{ + { + name: "should download chart", + url: "oci://localhost:5000/my_repo", + chartVersion: &repo.ChartVersion{ + Metadata: &chart.Metadata{Name: "chart"}, + URLs: []string{"oci://localhost:5000/my_repo/podinfo:1.0.0"}, + }, + expected: "localhost:5000/my_repo/podinfo:1.0.0", + }, + { + name: "no chart URL", + url: "", + chartVersion: &repo.ChartVersion{Metadata: &chart.Metadata{Name: "chart"}}, + expectedErr: true, + }, + { + name: "invalid chart URL", + url: "oci://localhost:5000/my_repo", + chartVersion: &repo.ChartVersion{ + Metadata: &chart.Metadata{Name: "chart"}, + URLs: []string{"oci://localhost:5000 /my_repo/podinfo:1.0.0"}, + }, + expectedErr: true, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + u, err := url.Parse(tc.url) + g.Expect(err).ToNot(HaveOccurred()) + + mg := OCIMockGetter{} + r := OCIChartRepository{ + Client: &mg, + URL: *u, + } + + res, err := r.DownloadChart(tc.chartVersion) + if tc.expectedErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(mg.LastCalledURL).To(Equal(tc.expected)) + g.Expect(res).ToNot(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} diff --git a/internal/helm/repository/repository.go b/internal/helm/repository/repository.go new file mode 100644 index 000000000..6cee5f658 --- /dev/null +++ b/internal/helm/repository/repository.go @@ -0,0 +1,40 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "bytes" + "context" + + "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/oci" +) + +// Downloader is used to download a chart from a remote Helm repository or OCI Helm repository. +type Downloader interface { + // GetChartVersion returns the repo.ChartVersion for the given name and version + // from the remote Helm repository or OCI Helm repository. + GetChartVersion(name, version string) (*repo.ChartVersion, error) + // DownloadChart downloads a chart from the remote Helm repository or OCI Helm repository. + DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer, error) + // VerifyChart verifies the chart against a signature. + VerifyChart(ctx context.Context, chart *repo.ChartVersion) (oci.VerificationResult, error) + // Clear removes all temporary files created by the downloader, caching the files if the cache is configured, + // and calling garbage collector to remove unused files. + Clear() error +} diff --git a/internal/helm/repository/utils.go b/internal/helm/repository/utils.go new file mode 100644 index 000000000..b784dec0d --- /dev/null +++ b/internal/helm/repository/utils.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "fmt" + "net/url" + "strings" + + helmreg "helm.sh/helm/v3/pkg/registry" +) + +const ( + alias = "@" +) + +var ( + // errInvalidDepURL is returned when the dependency URL is not supported + errInvalidDepURL = fmt.Errorf("invalid dependency repository URL") + // errInvalidAliasedDep is returned when the dependency URL is an alias + errInvalidAliasedDep = fmt.Errorf("aliased repository dependency is not supported") +) + +// NormalizeURL normalizes a ChartRepository URL by its scheme. +func NormalizeURL(repositoryURL string) (string, error) { + if repositoryURL == "" { + return "", nil + } + u, err := url.Parse(repositoryURL) + if err != nil { + return "", err + } + + if u.Scheme == helmreg.OCIScheme { + u.Path = strings.TrimRight(u.Path, "/") + // we perform the same operation on u.RawPath so that it will be a valid encoding + // of u.Path. This allows u.EscapedPath() (which is used in computing u.String()) to return + // the correct value when the path is url encoded. + // ref: https://pkg.go.dev/net/url#URL.EscapedPath + u.RawPath = strings.TrimRight(u.RawPath, "/") + return u.String(), nil + } + + u.Path = strings.TrimRight(u.Path, "/") + "/" + u.RawPath = strings.TrimRight(u.RawPath, "/") + "/" + return u.String(), nil +} + +// ValidateDepURL returns an error if the given depended repository URL declaration is not supported +// The reason for this is that the dependency manager will not be able to resolve the alias declaration +// e.g. repository: "@fantastic-charts" +func ValidateDepURL(repositoryURL string) error { + switch { + case strings.HasPrefix(repositoryURL, helmreg.OCIScheme): + return nil + case strings.HasPrefix(repositoryURL, "https://") || strings.HasPrefix(repositoryURL, "http://"): + return nil + case strings.HasPrefix(repositoryURL, alias): + return fmt.Errorf("%w: %s", errInvalidAliasedDep, repositoryURL) + default: + return fmt.Errorf("%w: %s", errInvalidDepURL, repositoryURL) + } +} diff --git a/internal/helm/repository/utils_test.go b/internal/helm/repository/utils_test.go new file mode 100644 index 000000000..a1fa2dcaa --- /dev/null +++ b/internal/helm/repository/utils_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repository + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestNormalizeURL(t *testing.T) { + tests := []struct { + name string + url string + want string + wantErr bool + }{ + { + name: "with slash", + url: "http://example.com/", + want: "http://example.com/", + }, + { + name: "without slash", + url: "http://example.com", + want: "http://example.com/", + }, + { + name: "double slash", + url: "http://example.com//", + want: "http://example.com/", + }, + { + name: "oci with slash", + url: "oci://example.com/", + want: "oci://example.com", + }, + { + name: "oci double slash", + url: "oci://example.com//", + want: "oci://example.com", + }, + { + name: "url with query", + url: "http://example.com?st=pr", + want: "http://example.com/?st=pr", + }, + { + name: "url with slash and query", + url: "http://example.com/?st=pr", + want: "http://example.com/?st=pr", + }, + { + name: "url with encoded path", + url: "http://example.com/next%2Fpath", + want: "http://example.com/next%2Fpath/", + }, + { + name: "url with encoded path and slash", + url: "http://example.com/next%2Fpath/", + want: "http://example.com/next%2Fpath/", + }, + { + name: "empty url", + url: "", + want: "", + }, + { + name: "bad url", + url: "://badurl.", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := NormalizeURL(tt.url) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).To(Not(HaveOccurred())) + g.Expect(got).To(Equal(tt.want)) + }) + } +} diff --git a/internal/helm/repository_test.go b/internal/helm/repository_test.go deleted file mode 100644 index 469186ad2..000000000 --- a/internal/helm/repository_test.go +++ /dev/null @@ -1,410 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "bytes" - "io/ioutil" - "net/url" - "reflect" - "strings" - "testing" - "time" - - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/getter" - "helm.sh/helm/v3/pkg/repo" -) - -const ( - testfile = "testdata/local-index.yaml" - chartmuseumtestfile = "testdata/chartmuseum-index.yaml" - unorderedtestfile = "testdata/local-index-unordered.yaml" - indexWithDuplicates = ` -apiVersion: v1 -entries: - nginx: - - urls: - - https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz - name: nginx - description: string - version: 0.2.0 - home: https://github.com/something/else - digest: "sha256:1234567890abcdef" - nginx: - - urls: - - https://kubernetes-charts.storage.googleapis.com/alpine-1.0.0.tgz - - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz - name: alpine - description: string - version: 1.0.0 - home: https://github.com/something - digest: "sha256:1234567890abcdef" -` -) - -func TestNewChartRepository(t *testing.T) { - repositoryURL := "https://example.com" - providers := getter.Providers{ - getter.Provider{ - Schemes: []string{"https"}, - New: getter.NewHTTPGetter, - }, - } - options := []getter.Option{getter.WithBasicAuth("username", "password")} - - t.Run("should construct chart repository", func(t *testing.T) { - r, err := NewChartRepository(repositoryURL, providers, options) - if err != nil { - t.Error(err) - } - if got := r.URL; got != repositoryURL { - t.Fatalf("Expecting %q repository URL, got: %q", repositoryURL, got) - } - if r.Client == nil { - t.Fatalf("Expecting client, got nil") - } - if !reflect.DeepEqual(r.Options, options) { - t.Fatalf("Client options mismatth") - } - }) - - t.Run("should error on URL parsing failure", func(t *testing.T) { - _, err := NewChartRepository("https://ex ample.com", nil, nil) - switch err.(type) { - case *url.Error: - default: - t.Fatalf("Expecting URL error, got: %v", err) - } - }) - - t.Run("should error on unsupported scheme", func(t *testing.T) { - _, err := NewChartRepository("http://example.com", providers, nil) - if err == nil { - t.Fatalf("Expecting unsupported scheme error") - } - }) -} - -func TestChartRepository_Get(t *testing.T) { - i := repo.NewIndexFile() - i.Add(&chart.Metadata{Name: "chart", Version: "exact"}, "chart-exact.tgz", "http://example.com/charts", "sha256:1234567890") - i.Add(&chart.Metadata{Name: "chart", Version: "0.1.0"}, "chart-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Add(&chart.Metadata{Name: "chart", Version: "0.1.1"}, "chart-0.1.1.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Add(&chart.Metadata{Name: "chart", Version: "0.1.5+b.min.minute"}, "chart-0.1.5+b.min.minute.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Entries["chart"][len(i.Entries["chart"])-1].Created = time.Now().Add(-time.Minute) - i.Add(&chart.Metadata{Name: "chart", Version: "0.1.5+a.min.hour"}, "chart-0.1.5+a.min.hour.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Entries["chart"][len(i.Entries["chart"])-1].Created = time.Now().Add(-time.Hour) - i.Add(&chart.Metadata{Name: "chart", Version: "0.1.5+c.now"}, "chart-0.1.5+c.now.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Add(&chart.Metadata{Name: "chart", Version: "0.2.0"}, "chart-0.2.0.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Add(&chart.Metadata{Name: "chart", Version: "1.0.0"}, "chart-1.0.0.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.Add(&chart.Metadata{Name: "chart", Version: "1.1.0-rc.1"}, "chart-1.1.0-rc.1.tgz", "http://example.com/charts", "sha256:1234567890abc") - i.SortEntries() - r := &ChartRepository{Index: i} - - tests := []struct { - name string - chartName string - chartVersion string - wantVersion string - wantErr bool - }{ - { - name: "exact matth", - chartName: "chart", - chartVersion: "exact", - wantVersion: "exact", - }, - { - name: "stable version", - chartName: "chart", - chartVersion: "", - wantVersion: "1.0.0", - }, - { - name: "stable version (asterisk)", - chartName: "chart", - chartVersion: "*", - wantVersion: "1.0.0", - }, - { - name: "semver range", - chartName: "chart", - chartVersion: "<1.0.0", - wantVersion: "0.2.0", - }, - { - name: "unfulfilled range", - chartName: "chart", - chartVersion: ">2.0.0", - wantErr: true, - }, - { - name: "invalid chart", - chartName: "non-existing", - wantErr: true, - }, - { - name: "match newest if ambiguous", - chartName: "chart", - chartVersion: "0.1.5", - wantVersion: "0.1.5+c.now", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cv, err := r.Get(tt.chartName, tt.chartVersion) - if (err != nil) != tt.wantErr { - t.Errorf("Get() error = %v, wantErr %v", err, tt.wantErr) - return - } - if err == nil && !strings.Contains(cv.Metadata.Version, tt.wantVersion) { - t.Errorf("Get() unexpected version = %s, want = %s", cv.Metadata.Version, tt.wantVersion) - } - }) - } -} - -func TestChartRepository_DownloadChart(t *testing.T) { - tests := []struct { - name string - url string - chartVersion *repo.ChartVersion - wantURL string - wantErr bool - }{ - { - name: "relative URL", - url: "https://example.com", - chartVersion: &repo.ChartVersion{ - Metadata: &chart.Metadata{Name: "chart"}, - URLs: []string{"charts/foo-1.0.0.tgz"}, - }, - wantURL: "https://example.com/charts/foo-1.0.0.tgz", - }, - { - name: "no chart URL", - chartVersion: &repo.ChartVersion{Metadata: &chart.Metadata{Name: "chart"}}, - wantErr: true, - }, - { - name: "invalid chart URL", - chartVersion: &repo.ChartVersion{ - Metadata: &chart.Metadata{Name: "chart"}, - URLs: []string{"https://ex ample.com/charts/foo-1.0.0.tgz"}, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mg := mockGetter{} - r := &ChartRepository{ - URL: tt.url, - Client: &mg, - } - _, err := r.DownloadChart(tt.chartVersion) - if (err != nil) != tt.wantErr { - t.Errorf("DownloadChart() error = %v, wantErr %v", err, tt.wantErr) - return - } - if err == nil && mg.requestedURL != tt.wantURL { - t.Errorf("DownloadChart() requested URL = %s, wantURL %s", mg.requestedURL, tt.wantURL) - } - }) - } -} - -func TestChartRepository_DownloadIndex(t *testing.T) { - b, err := ioutil.ReadFile(chartmuseumtestfile) - if err != nil { - t.Fatal(err) - } - mg := mockGetter{response: b} - r := &ChartRepository{ - URL: "https://example.com", - Client: &mg, - } - if err := r.DownloadIndex(); err != nil { - t.Fatal(err) - } - if expected := r.URL + "/index.yaml"; mg.requestedURL != expected { - t.Errorf("DownloadIndex() requested URL = %s, wantURL %s", mg.requestedURL, expected) - } - verifyLocalIndex(t, r.Index) -} - -// Index load tests are derived from https://github.com/helm/helm/blob/v3.3.4/pkg/repo/index_test.go#L108 -// to ensure parity with Helm behaviour. -func TestChartRepository_LoadIndex(t *testing.T) { - tests := []struct { - name string - filename string - }{ - { - name: "regular index file", - filename: testfile, - }, - { - name: "chartmuseum index file", - filename: chartmuseumtestfile, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - b, err := ioutil.ReadFile(tt.filename) - if err != nil { - t.Fatal(err) - } - r := &ChartRepository{} - err = r.LoadIndex(b) - if err != nil { - t.Fatal(err) - } - verifyLocalIndex(t, r.Index) - }) - } -} - -func TestChartRepository_LoadIndex_Duplicates(t *testing.T) { - r := &ChartRepository{} - if err := r.LoadIndex([]byte(indexWithDuplicates)); err == nil { - t.Errorf("Expected an error when duplicate entries are present") - } -} - -func TestChartRepository_LoadIndex_Unordered(t *testing.T) { - b, err := ioutil.ReadFile(unorderedtestfile) - if err != nil { - t.Fatal(err) - } - r := &ChartRepository{} - err = r.LoadIndex(b) - if err != nil { - t.Fatal(err) - } - verifyLocalIndex(t, r.Index) -} - -func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { - numEntries := len(i.Entries) - if numEntries != 3 { - t.Errorf("Expected 3 entries in index file but got %d", numEntries) - } - - alpine, ok := i.Entries["alpine"] - if !ok { - t.Fatalf("'alpine' section not found.") - } - - if l := len(alpine); l != 1 { - t.Fatalf("'alpine' should have 1 chart, got %d", l) - } - - nginx, ok := i.Entries["nginx"] - if !ok || len(nginx) != 2 { - t.Fatalf("Expected 2 nginx entries") - } - - expects := []*repo.ChartVersion{ - { - Metadata: &chart.Metadata{ - Name: "alpine", - Description: "string", - Version: "1.0.0", - Keywords: []string{"linux", "alpine", "small", "sumtin"}, - Home: "https://github.com/something", - }, - URLs: []string{ - "https://kubernetes-charts.storage.googleapis.com/alpine-1.0.0.tgz", - "http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz", - }, - Digest: "sha256:1234567890abcdef", - }, - { - Metadata: &chart.Metadata{ - Name: "nginx", - Description: "string", - Version: "0.2.0", - Keywords: []string{"popular", "web server", "proxy"}, - Home: "https://github.com/something/else", - }, - URLs: []string{ - "https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz", - }, - Digest: "sha256:1234567890abcdef", - }, - { - Metadata: &chart.Metadata{ - Name: "nginx", - Description: "string", - Version: "0.1.0", - Keywords: []string{"popular", "web server", "proxy"}, - Home: "https://github.com/something", - }, - URLs: []string{ - "https://kubernetes-charts.storage.googleapis.com/nginx-0.1.0.tgz", - }, - Digest: "sha256:1234567890abcdef", - }, - } - tests := []*repo.ChartVersion{alpine[0], nginx[0], nginx[1]} - - for i, tt := range tests { - expect := expects[i] - if tt.Name != expect.Name { - t.Errorf("Expected name %q, got %q", expect.Name, tt.Name) - } - if tt.Description != expect.Description { - t.Errorf("Expected description %q, got %q", expect.Description, tt.Description) - } - if tt.Version != expect.Version { - t.Errorf("Expected version %q, got %q", expect.Version, tt.Version) - } - if tt.Digest != expect.Digest { - t.Errorf("Expected digest %q, got %q", expect.Digest, tt.Digest) - } - if tt.Home != expect.Home { - t.Errorf("Expected home %q, got %q", expect.Home, tt.Home) - } - - for i, url := range tt.URLs { - if url != expect.URLs[i] { - t.Errorf("Expected URL %q, got %q", expect.URLs[i], url) - } - } - for i, kw := range tt.Keywords { - if kw != expect.Keywords[i] { - t.Errorf("Expected keywords %q, got %q", expect.Keywords[i], kw) - } - } - } -} - -type mockGetter struct { - requestedURL string - response []byte -} - -func (g *mockGetter) Get(url string, options ...getter.Option) (*bytes.Buffer, error) { - g.requestedURL = url - return bytes.NewBuffer(g.response), nil -} diff --git a/internal/helm/testdata/chartmuseum-index.json b/internal/helm/testdata/chartmuseum-index.json new file mode 100644 index 000000000..15ba3e704 --- /dev/null +++ b/internal/helm/testdata/chartmuseum-index.json @@ -0,0 +1,112 @@ +{ + "serverInfo": { + "contextPath": "/v1/helm" + }, + "apiVersion": "v1", + "generated": "0001-01-01T00:00:00Z", + "entries": { + "alpine": [ + { + "name": "alpine", + "home": "https://github.com/something", + "version": "1.0.0", + "description": "string", + "keywords": [ + "linux", + "alpine", + "small", + "sumtin" + ], + "apiVersion": "v1", + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/alpine-1.0.0.tgz", + "http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ], + "chartWithNoURL": [ + { + "name": "chartWithNoURL", + "home": "https://github.com/something", + "version": "1.0.0", + "description": "string", + "keywords": [ + "small", + "sumtin" + ], + "apiVersion": "v1", + "urls": null, + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ], + "nginx": [ + { + "name": "nginx", + "home": "https://github.com/something/else", + "version": "0.2.0", + "description": "string", + "keywords": [ + "popular", + "web server", + "proxy" + ], + "apiVersion": "v1", + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-0.2.0.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + }, + { + "name": "nginx", + "home": "https://github.com/something", + "version": "0.1.0", + "description": "string", + "keywords": [ + "popular", + "web server", + "proxy" + ], + "apiVersion": "v1", + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-0.1.0.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ], + "xChartWithDuplicateDependenciesAndMissingAlias": [ + { + "name": "xChartWithDuplicateDependenciesAndMissingAlias", + "home": "https://example.com/something", + "version": "1.2.3", + "description": "string", + "keywords": [ + "broken", + "still accepted" + ], + "apiVersion": "v1", + "dependencies": [ + { + "name": "kube-rbac-proxy", + "version": "0.9.1", + "repository": "" + }, + { + "name": "kube-rbac-proxy", + "version": "0.9.1", + "repository": "" + } + ], + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } + ] + } +} diff --git a/internal/helm/testdata/chartmuseum-index.yaml b/internal/helm/testdata/chartmuseum-index.yaml index 3077596f4..ab00c1807 100644 --- a/internal/helm/testdata/chartmuseum-index.yaml +++ b/internal/helm/testdata/chartmuseum-index.yaml @@ -48,3 +48,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/helm/testdata/charts/empty.tgz b/internal/helm/testdata/charts/empty.tgz new file mode 100644 index 000000000..872c01559 Binary files /dev/null and b/internal/helm/testdata/charts/empty.tgz differ diff --git a/internal/helm/testdata/charts/helmchart-0.1.0.tgz b/internal/helm/testdata/charts/helmchart-0.1.0.tgz index f64a32eee..1ffdde531 100644 Binary files a/internal/helm/testdata/charts/helmchart-0.1.0.tgz and b/internal/helm/testdata/charts/helmchart-0.1.0.tgz differ diff --git a/internal/helm/testdata/charts/helmchart-badname-0.1.0.tgz b/internal/helm/testdata/charts/helmchart-badname-0.1.0.tgz new file mode 100644 index 000000000..1f6675d5c Binary files /dev/null and b/internal/helm/testdata/charts/helmchart-badname-0.1.0.tgz differ diff --git a/internal/helm/testdata/charts/helmchart-v1/.helmignore b/internal/helm/testdata/charts/helmchart-v1/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchart-v1/Chart.yaml b/internal/helm/testdata/charts/helmchart-v1/Chart.yaml new file mode 100644 index 000000000..fed8cedf2 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A legacy Helm chart for Kubernetes +name: helmchart-v1 +version: 0.2.0 diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/NOTES.txt b/internal/helm/testdata/charts/helmchart-v1/templates/NOTES.txt new file mode 100644 index 000000000..c9a8aa76a --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchart-v1.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchart-v1.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchart-v1.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchart-v1.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchart-v1/templates/_helpers.tpl new file mode 100644 index 000000000..ecb988262 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchart-v1.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchart-v1.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchart-v1.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchart-v1.labels" -}} +app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} +helm.sh/chart: {{ include "helmchart-v1.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchart-v1.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchart-v1.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/deployment.yaml b/internal/helm/testdata/charts/helmchart-v1/templates/deployment.yaml new file mode 100644 index 000000000..8a435b3a1 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchart-v1.fullname" . }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "helmchart-v1.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/ingress.yaml b/internal/helm/testdata/charts/helmchart-v1/templates/ingress.yaml new file mode 100644 index 000000000..7db207166 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchart-v1.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/service.yaml b/internal/helm/testdata/charts/helmchart-v1/templates/service.yaml new file mode 100644 index 000000000..81a8cb688 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchart-v1.fullname" . }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchart-v1/templates/serviceaccount.yaml new file mode 100644 index 000000000..2f9b53dcb --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "helmchart-v1.serviceAccountName" . }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchart-v1/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchart-v1/templates/tests/test-connection.yaml new file mode 100644 index 000000000..da5b5c324 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchart-v1.fullname" . }}-test-connection" + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchart-v1.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchart-v1/values.yaml b/internal/helm/testdata/charts/helmchart-v1/values.yaml new file mode 100644 index 000000000..3c03b2cd9 --- /dev/null +++ b/internal/helm/testdata/charts/helmchart-v1/values.yaml @@ -0,0 +1,68 @@ +# Default values for helmchart-v1. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/internal/helm/testdata/charts/helmchart/values-prod.yaml b/internal/helm/testdata/charts/helmchart/values-prod.yaml new file mode 100644 index 000000000..5ef7832ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchart/values-prod.yaml @@ -0,0 +1 @@ +replicaCount: 2 diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1-0.3.0.tgz b/internal/helm/testdata/charts/helmchartwithdeps-v1-0.3.0.tgz new file mode 100644 index 000000000..5b648fcfc Binary files /dev/null and b/internal/helm/testdata/charts/helmchartwithdeps-v1-0.3.0.tgz differ diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/.helmignore b/internal/helm/testdata/charts/helmchartwithdeps-v1/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/Chart.yaml new file mode 100644 index 000000000..55508024f --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A legacy Helm chart for Kubernetes +name: helmchartwithdeps-v1 +version: 0.3.0 diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/requirements.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/requirements.yaml new file mode 100644 index 000000000..d6c815e6f --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: +- name: helmchart-v1 + version: "0.2.0" + repository: "file://../helmchart-v1" diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/NOTES.txt new file mode 100644 index 000000000..c9a8aa76a --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchart-v1.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchart-v1.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchart-v1.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchart-v1.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/_helpers.tpl new file mode 100644 index 000000000..ecb988262 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchart-v1.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchart-v1.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchart-v1.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchart-v1.labels" -}} +app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} +helm.sh/chart: {{ include "helmchart-v1.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchart-v1.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchart-v1.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/deployment.yaml new file mode 100644 index 000000000..8a435b3a1 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchart-v1.fullname" . }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "helmchart-v1.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/ingress.yaml new file mode 100644 index 000000000..7db207166 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchart-v1.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/service.yaml new file mode 100644 index 000000000..81a8cb688 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchart-v1.fullname" . }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "helmchart-v1.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/serviceaccount.yaml new file mode 100644 index 000000000..2f9b53dcb --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "helmchart-v1.serviceAccountName" . }} + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/tests/test-connection.yaml new file mode 100644 index 000000000..da5b5c324 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchart-v1.fullname" . }}-test-connection" + labels: +{{ include "helmchart-v1.labels" . | indent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchart-v1.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchartwithdeps-v1/values.yaml b/internal/helm/testdata/charts/helmchartwithdeps-v1/values.yaml new file mode 100644 index 000000000..3c03b2cd9 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps-v1/values.yaml @@ -0,0 +1,68 @@ +# Default values for helmchart-v1. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/internal/helm/testdata/charts/helmchartwithdeps/Chart.lock b/internal/helm/testdata/charts/helmchartwithdeps/Chart.lock new file mode 100644 index 000000000..83401ac65 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdeps/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: grafana + repository: https://grafana.github.io/helm-charts + version: 6.17.4 +digest: sha256:1e41c97e27347f433ff0212bf52c344bc82dd435f70129d15e96cd2c8fcc32bb +generated: "2021-11-02T01:25:59.624290788+01:00" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore b/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock new file mode 100644 index 000000000..83401ac65 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: grafana + repository: https://grafana.github.io/helm-charts + version: 6.17.4 +digest: sha256:1e41c97e27347f433ff0212bf52c344bc82dd435f70129d15e96cd2c8fcc32bb +generated: "2021-11-02T01:25:59.624290788+01:00" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml new file mode 100644 index 000000000..1e32b80ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +name: helmchartwithdeps +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 + +dependencies: + - name: helmchart + version: "0.1.0" + - name: helmchart + alias: aliased + version: "0.1.0" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml new file mode 100644 index 000000000..46eaf150b --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: helmchart +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt new file mode 100644 index 000000000..741a77d8e --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchart.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchart.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl new file mode 100644 index 000000000..f6431fcb2 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchart.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchart.labels" -}} +helm.sh/chart: {{ include "helmchart.chart" . }} +{{ include "helmchart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "helmchart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "helmchart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchart.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml new file mode 100644 index 000000000..daa9f8e56 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchart.fullname" . }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "helmchart.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "helmchart.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "helmchart.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml new file mode 100644 index 000000000..c2069e9c8 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchart.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml new file mode 100644 index 000000000..12e16ef71 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchart.fullname" . }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "helmchart.selectorLabels" . | nindent 4 }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml new file mode 100644 index 000000000..da3512648 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "helmchart.serviceAccountName" . }} + labels: +{{ include "helmchart.labels" . | nindent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml new file mode 100644 index 000000000..11b0b1a96 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchart.fullname" . }}-test-connection" + labels: +{{ include "helmchart.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchart.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml new file mode 100644 index 000000000..5ef7832ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml @@ -0,0 +1 @@ +replicaCount: 2 diff --git a/controllers/testdata/charts/helmchart/values.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml similarity index 100% rename from controllers/testdata/charts/helmchart/values.yaml rename to internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt new file mode 100644 index 000000000..105423d28 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchartwithdeps.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchartwithdeps.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchartwithdeps.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchartwithdeps.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl new file mode 100644 index 000000000..a718f8b32 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchartwithdeps.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchartwithdeps.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchartwithdeps.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchartwithdeps.labels" -}} +helm.sh/chart: {{ include "helmchartwithdeps.chart" . }} +{{ include "helmchartwithdeps.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "helmchartwithdeps.selectorLabels" -}} +app.kubernetes.io/name: {{ include "helmchartwithdeps.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchartwithdeps.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchartwithdeps.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml new file mode 100644 index 000000000..08f62c740 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchartwithdeps.fullname" . }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "helmchartwithdeps.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml new file mode 100644 index 000000000..6c1b03148 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchartwithdeps.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml new file mode 100644 index 000000000..2c270c67b --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchartwithdeps.fullname" . }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 4 }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml new file mode 100644 index 000000000..2eec29c55 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "helmchartwithdeps.serviceAccountName" . }} + labels: +{{ include "helmchartwithdeps.labels" . | nindent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml new file mode 100644 index 000000000..bbcd09201 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchartwithdeps.fullname" . }}-test-connection" + labels: +{{ include "helmchartwithdeps.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchartwithdeps.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/controllers/testdata/charts/helmchartwithdeps/values.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml similarity index 100% rename from controllers/testdata/charts/helmchartwithdeps/values.yaml rename to internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml diff --git a/internal/helm/testdata/local-index-unordered.yaml b/internal/helm/testdata/local-index-unordered.yaml index 7482baaae..91ad62f1e 100644 --- a/internal/helm/testdata/local-index-unordered.yaml +++ b/internal/helm/testdata/local-index-unordered.yaml @@ -46,3 +46,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/helm/testdata/local-index.yaml b/internal/helm/testdata/local-index.yaml index e680d2a3e..56c0ac2c3 100644 --- a/internal/helm/testdata/local-index.yaml +++ b/internal/helm/testdata/local-index.yaml @@ -46,3 +46,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/index/digest.go b/internal/index/digest.go new file mode 100644 index 000000000..1f7bd642f --- /dev/null +++ b/internal/index/digest.go @@ -0,0 +1,221 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" + + "github.com/opencontainers/go-digest" +) + +// Digester is a simple string key value index that can be used to calculate +// digests of the index. The digests are cached, and only recalculated if the +// index has changed. +type Digester struct { + // index is the map of keys and their associated values. + index map[string]string + + // digests is a cache of digests calculated for the index. + digests map[digest.Algorithm]digest.Digest + + mu sync.RWMutex +} + +// DigesterOption is a functional option for configuring a digester. +type DigesterOption func(*Digester) + +// WithIndex returns a DigesterOption that sets the index to the provided map. +// The map is copied, so any changes to the map after the option is applied +// will not be reflected in the index. +func WithIndex(i map[string]string) DigesterOption { + return func(d *Digester) { + if i != nil { + d.mu.Lock() + defer d.mu.Unlock() + + if d.index == nil { + d.index = make(map[string]string, len(i)) + } + for k, v := range i { + d.index[k] = v + } + d.reset() + } + } +} + +// NewDigester returns a new digest index with an empty initialized index. +func NewDigester(opts ...DigesterOption) *Digester { + d := &Digester{ + digests: make(map[digest.Algorithm]digest.Digest, 0), + index: make(map[string]string, 0), + } + for _, opt := range opts { + opt(d) + } + return d +} + +// Add adds the key and digest to the index. +func (i *Digester) Add(key, value string) { + i.mu.Lock() + defer i.mu.Unlock() + + i.index[key] = value + i.reset() +} + +// Delete removes the key from the index. +func (i *Digester) Delete(key string) { + i.mu.Lock() + defer i.mu.Unlock() + + if _, ok := i.index[key]; ok { + delete(i.index, key) + i.reset() + } +} + +// Get returns the digest for the key, or an empty digest if the key is not +// found. +func (i *Digester) Get(key string) string { + i.mu.RLock() + defer i.mu.RUnlock() + + return i.index[key] +} + +// Has returns true if the index contains the key. +func (i *Digester) Has(key string) bool { + i.mu.RLock() + defer i.mu.RUnlock() + + _, ok := i.index[key] + return ok +} + +// Index returns a copy of the index. +func (i *Digester) Index() map[string]string { + i.mu.RLock() + defer i.mu.RUnlock() + + index := make(map[string]string, len(i.index)) + for k, v := range i.index { + index[k] = v + } + return index +} + +// Len returns the number of keys in the index. +func (i *Digester) Len() int { + i.mu.RLock() + defer i.mu.RUnlock() + return len(i.index) +} + +// String returns a string representation of the index. The keys are stable +// sorted, and the string representation of the key/value pairs is written, +// each pair on a newline with a space between them. +func (i *Digester) String() string { + i.mu.RLock() + defer i.mu.RUnlock() + + keys := i.sortedKeys() + var b strings.Builder + for _, k := range keys { + b.Grow(len(k) + len(i.index[k]) + 2) + writeLine(&b, k, i.index[k]) + } + return b.String() +} + +// WriteTo writes the index to the writer. The keys are stable sorted, and the +// string representation of the key/value pairs is written, each pair on a +// newline with a space between them. +func (i *Digester) WriteTo(w io.Writer) (int64, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + keys := i.sortedKeys() + var n int64 + for _, k := range keys { + nn, err := writeLine(w, k, i.index[k]) + n += int64(nn) + if err != nil { + return n, err + } + } + return n, nil +} + +// Digest returns the digest of the index using the provided algorithm. +// If the index has not changed since the last call to Digest, the cached +// digest is returned. +// For verifying the index against a known digest, use Verify. +func (i *Digester) Digest(a digest.Algorithm) digest.Digest { + i.mu.Lock() + defer i.mu.Unlock() + + if _, ok := i.digests[a]; !ok { + digester := a.Digester() + keys := i.sortedKeys() + for _, k := range keys { + _, _ = writeLine(digester.Hash(), k, i.index[k]) + } + i.digests[a] = digester.Digest() + } + + return i.digests[a] +} + +// Verify returns true if the index matches the provided digest. +func (i *Digester) Verify(d digest.Digest) bool { + i.mu.RLock() + defer i.mu.RUnlock() + + verifier := d.Verifier() + keys := i.sortedKeys() + for _, k := range keys { + _, _ = writeLine(verifier, k, i.index[k]) + } + return verifier.Verified() +} + +// sortedKeys returns a slice of the keys in the index, sorted alphabetically. +func (i *Digester) sortedKeys() []string { + keys := make([]string, 0, len(i.index)) + for k := range i.index { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// reset clears the digests cache. +func (i *Digester) reset() { + i.digests = make(map[digest.Algorithm]digest.Digest, 0) +} + +// writeLine writes the key and digest to the writer, separated by a space and +// terminating with a newline. +func writeLine(w io.Writer, key, value string) (int, error) { + return fmt.Fprintf(w, "%s %s\n", key, value) +} diff --git a/internal/index/digest_test.go b/internal/index/digest_test.go new file mode 100644 index 000000000..531bb9329 --- /dev/null +++ b/internal/index/digest_test.go @@ -0,0 +1,381 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "bytes" + "errors" + "testing" + + . "github.com/onsi/gomega" + "github.com/opencontainers/go-digest" +) + +func TestWithIndex(t *testing.T) { + t.Run("sets the index", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{"foo": "bar"} + d := &Digester{} + WithIndex(i)(d) + + g.Expect(d.index).To(Equal(i)) + }) + + t.Run("resets the digests", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{"foo": "bar"} + d := &Digester{ + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + WithIndex(i)(d) + + g.Expect(d.digests).To(BeEmpty()) + }) + + t.Run("handles nil index", func(t *testing.T) { + g := NewWithT(t) + d := &Digester{} + WithIndex(nil)(d) + g.Expect(d.index).To(BeNil()) + }) +} + +func TestNewDigester(t *testing.T) { + t.Run("default", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + + g.Expect(d).ToNot(BeNil()) + g.Expect(d.index).ToNot(BeNil()) + g.Expect(d.digests).ToNot(BeNil()) + }) + + t.Run("with index", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{"foo": "bar"} + d := NewDigester(WithIndex(i)) + + g.Expect(d).ToNot(BeNil()) + g.Expect(d.index).To(Equal(i)) + g.Expect(d.digests).ToNot(BeNil()) + }) +} + +func TestDigester_Add(t *testing.T) { + t.Run("adds", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + + g.Expect(d.index).To(HaveKeyWithValue("foo", "bar")) + }) + + t.Run("overwrites", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + d.Add("foo", "baz") + + g.Expect(d.index).To(HaveKeyWithValue("foo", "baz")) + }) + + t.Run("resets digests", func(t *testing.T) { + g := NewWithT(t) + + d := &Digester{ + index: map[string]string{}, + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + d.Add("foo", "bar") + + g.Expect(d.digests).To(BeEmpty()) + }) + + t.Run("adds empty key and value", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Add("", "") + g.Expect(d.index).To(HaveKeyWithValue("", "")) + }) +} + +func TestDigester_Delete(t *testing.T) { + t.Run("deletes", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + d.Delete("foo") + + g.Expect(d.index).ToNot(HaveKey("foo")) + }) + + t.Run("resets digests", func(t *testing.T) { + g := NewWithT(t) + + d := &Digester{ + index: map[string]string{ + "foo": "bar", + }, + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + + d.Delete("nop") + g.Expect(d.digests).To(HaveLen(1)) + + d.Delete("foo") + g.Expect(d.digests).To(BeEmpty()) + }) + + t.Run("deletes non-existent key without error", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Delete("non-existent") + g.Expect(d.index).To(BeEmpty()) + g.Expect(d.digests).To(BeEmpty()) + }) +} + +func TestDigester_Get(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + + g.Expect(d.Get("foo")).To(Equal("bar")) + g.Expect(d.Get("bar")).To(BeEmpty()) +} + +func TestDigester_Has(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.Add("foo", "bar") + + g.Expect(d.Has("foo")).To(BeTrue()) + g.Expect(d.Has("bar")).To(BeFalse()) +} + +func TestDigester_Index(t *testing.T) { + t.Run("returns a copy of the index", func(t *testing.T) { + g := NewWithT(t) + + i := map[string]string{ + "foo": "bar", + "bar": "baz", + } + d := NewDigester(WithIndex(i)) + + iCopy := d.Index() + g.Expect(iCopy).To(Equal(i)) + g.Expect(iCopy).ToNot(BeIdenticalTo(i)) + }) + + t.Run("returns an empty copy for an empty index", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + emptyIndex := d.Index() + g.Expect(emptyIndex).To(BeEmpty()) + }) +} + +func TestDigester_Len(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + + g.Expect(d.Len()).To(Equal(2)) + + g.Expect(NewDigester().Len()).To(Equal(0)) +} + +func TestDigester_String(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + + g.Expect(d.String()).To(Equal(`bar baz +foo bar +`)) + + g.Expect(NewDigester().String()).To(Equal("")) +} + +func TestDigester_WriteTo(t *testing.T) { + t.Run("writes", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + expect := `bar baz +foo bar +` + + var buf bytes.Buffer + n, err := d.WriteTo(&buf) + + g.Expect(n).To(Equal(int64(len(expect)))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(buf.String()).To(Equal(expect)) + }) + + t.Run("errors", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + + w := &fakeWriter{ + err: errors.New("write error"), + written: 5, + } + n, err := d.WriteTo(w) + + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, w.err)).To(BeTrue()) + g.Expect(n).To(Equal(int64(w.written))) + }) +} + +func TestDigester_Digest(t *testing.T) { + t.Run("returns digest", func(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + "bar": "baz", + })) + expect := digest.SHA256.FromString(d.String()) + + g.Expect(d.Digest(digest.SHA256)).To(Equal(expect)) + g.Expect(d.digests).To(HaveKeyWithValue(digest.SHA256, expect)) + }) + + t.Run("returns cached digest", func(t *testing.T) { + g := NewWithT(t) + + d := &Digester{ + index: map[string]string{ + "foo": "bar", + "bar": "baz", + }, + digests: map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + }, + } + + g.Expect(d.Digest(digest.SHA256)).To(Equal(d.digests[digest.SHA256])) + }) +} + +func TestDigester_Verify(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "foo": "bar", + })) + + g.Expect(d.Verify(d.Digest(digest.SHA256))).To(BeTrue()) + g.Expect(d.Verify(digest.SHA256.FromString("different"))).To(BeFalse()) +} + +func TestDigester_sortedKeys(t *testing.T) { + g := NewWithT(t) + + d := NewDigester(WithIndex(map[string]string{ + "c/d/e": "bar", + "a/b/c": "baz", + "f/g/h": "foo", + })) + + g.Expect(d.sortedKeys()).To(Equal([]string{ + "a/b/c", + "c/d/e", + "f/g/h", + })) +} + +func TestDigester_reset(t *testing.T) { + g := NewWithT(t) + + d := NewDigester() + d.digests = map[digest.Algorithm]digest.Digest{ + digest.SHA256: "sha256:foo", + } + + d.reset() + g.Expect(d.digests).To(BeEmpty()) +} + +func Test_writeLine(t *testing.T) { + t.Run("writes", func(t *testing.T) { + g := NewWithT(t) + + var buf bytes.Buffer + n, err := writeLine(&buf, "foo", "bar") + + g.Expect(n).To(Equal(8)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(buf.String()).To(Equal(`foo bar +`)) + }) + + t.Run("errors", func(t *testing.T) { + g := NewWithT(t) + + w := &fakeWriter{ + err: errors.New("write error"), + written: 5, + } + n, err := writeLine(w, "foo", "bar") + + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, w.err)).To(BeTrue()) + g.Expect(n).To(Equal(w.written)) + }) +} + +type fakeWriter struct { + written int + err error +} + +func (f *fakeWriter) Write(p []byte) (n int, err error) { + return f.written, f.err +} diff --git a/internal/mock/gcs/server.go b/internal/mock/gcs/server.go new file mode 100644 index 000000000..d589a3cbc --- /dev/null +++ b/internal/mock/gcs/server.go @@ -0,0 +1,222 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcs + +import ( + "crypto/md5" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrObjectNotFound = errors.New("object not found") +) + +// Object is a mock Server object. +type Object struct { + Key string + Generation int64 + MetaGeneration int64 + ContentType string + Content []byte +} + +// Server is a simple Google Cloud Storage mock server. +// It serves the provided Objects for the BucketName on the HTTPAddress when +// Start or StartTLS is called. +// It provides primitive support "Generation Conditions" when Object contents +// are fetched. +// Ref: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Conditions +type Server struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*Object +} + +func NewServer(bucketName string) *Server { + s := &Server{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *Server) Start() { + s.srv.Start() +} + +func (s *Server) StartTLS(config *tls.Config) { + s.srv.TLS = config + s.srv.StartTLS() +} + +func (s *Server) Stop() { + s.srv.Close() +} + +func (s *Server) HTTPAddress() string { + return s.srv.URL +} + +func (s *Server) getAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range s.Objects { + objs.Items = append(objs.Items, getGCSObject(s.BucketName, *o)) + } + return objs +} + +func (s *Server) getObjectFile(key string, generation int64) ([]byte, error) { + for _, o := range s.Objects { + if o.Key == key { + if generation == 0 || generation == o.Generation { + return o.Content, nil + } + } + } + return nil, ErrObjectNotFound +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + uri := strings.TrimPrefix(r.RequestURI, "/storage/v1") + + switch { + // Handle Bucket metadata related queries + case strings.HasPrefix(uri, "/b/"): + switch { + // Return metadata about the Bucket + case uri == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", s.BucketName): + etag := md5.New() + for _, v := range s.Objects { + etag.Write(v.Content) + } + response := getGCSBucket(s.BucketName, fmt.Sprintf("%x", etag.Sum(nil))) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + // Return metadata about a Bucket object + case strings.Contains(uri, "/o/"): + var obj *Object + for _, o := range s.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if uri == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", s.BucketName, url.QueryEscape(o.Key)) { + obj = o + break + } + } + if obj != nil { + response := getGCSObject(s.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + // Return metadata about all objects in the Bucket + case strings.Contains(uri, "/o?"): + response := s.getAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + default: + w.WriteHeader(404) + return + } + // Handle object file query + default: + bucketPrefix := fmt.Sprintf("/%s/", s.BucketName) + if strings.HasPrefix(uri, bucketPrefix) { + // The URL path is of the format //included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + + // Support "Generation Conditions" + // https://pkg.go.dev/cloud.google.com/go/storage#hdr-Conditions + var generation int64 + if matchGeneration := r.URL.Query().Get("ifGenerationMatch"); matchGeneration != "" { + var err error + if generation, err = strconv.ParseInt(matchGeneration, 10, 64); err != nil { + w.WriteHeader(500) + return + } + } + + // Handle returning object file in a bucket. + response, err := s.getObjectFile(key, generation) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCSObject(bucket string, obj Object) *raw.Object { + hash := md5.Sum(obj.Content) + etag := fmt.Sprintf("%x", hash) + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + Generation: obj.Generation, + Metageneration: obj.MetaGeneration, + Md5Hash: etag, + Etag: etag, + } +} + +func getGCSBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/internal/mock/s3/server.go b/internal/mock/s3/server.go new file mode 100644 index 000000000..904f19421 --- /dev/null +++ b/internal/mock/s3/server.go @@ -0,0 +1,157 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3 + +import ( + "crypto/md5" + "crypto/tls" + "fmt" + "net/http" + "net/http/httptest" + "path" + "path/filepath" + "strings" + "time" +) + +// Object is a mock Server object. +type Object struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +// Server is a simple AWS S3 mock server. +// It serves the provided Objects for the BucketName on the HTTPAddress when +// Start or StartTLS is called. +type Server struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*Object +} + +func NewServer(bucketName string) *Server { + s := &Server{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *Server) Start() { + s.srv.Start() +} + +func (s *Server) StartTLS(config *tls.Config) { + s.srv.TLS = config + s.srv.StartTLS() +} + +func (s *Server) Stop() { + s.srv.Close() +} + +func (s *Server) HTTPAddress() string { + return s.srv.URL +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + w.WriteHeader(200) + return + } + + if r.URL.Query().Has("location") { + w.WriteHeader(200) + w.Write([]byte(` + +Europe + `)) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + + %s + %s + %d + "%x" + STANDARD + `, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` + + + %s + + + %d + 1000 + false + %s + + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *Object + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%x\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + w.WriteHeader(200) + return + } + + w.WriteHeader(200) + w.Write(found.Content) + } +} diff --git a/internal/object/object.go b/internal/object/object.go new file mode 100644 index 000000000..37f8ef9fe --- /dev/null +++ b/internal/object/object.go @@ -0,0 +1,173 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "encoding/json" + "errors" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/pkg/apis/meta" +) + +var ( + ErrObservedGenerationNotFound = errors.New("observed generation not found") + ErrLastHandledReconcileAtNotFound = errors.New("last handled reconcile at not found") + ErrRequeueIntervalNotFound = errors.New("requeue interval not found") +) + +// toUnstructured converts a runtime object into Unstructured. +// Based on https://github.com/fluxcd/pkg/blob/b4a14854c75753ea9431693b39c4be672f246552/runtime/patch/utils.go#L55. +func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + // If the incoming object is already unstructured, perform a deep copy first + // otherwise DefaultUnstructuredConverter ends up returning the inner map without + // making a copy. + if _, ok := obj.(runtime.Unstructured); ok { + obj = obj.DeepCopyObject() + } + rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: rawMap}, nil +} + +// GetStatusLastHandledReconcileAt returns the status.lastHandledReconcileAt +// value of a given runtime object, if present. +func GetStatusLastHandledReconcileAt(obj runtime.Object) (string, error) { + u, err := toUnstructured(obj) + if err != nil { + return "", err + } + ra, found, err := unstructured.NestedString(u.Object, "status", "lastHandledReconcileAt") + if err != nil { + return "", err + } + if !found { + return "", ErrLastHandledReconcileAtNotFound + } + return ra, nil +} + +// SetStatusLastHandledReconcileAt sets the status.lastHandledReconcileAt value +// of a given runtime object. +func SetStatusLastHandledReconcileAt(obj runtime.Object, val string) error { + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return err + } + u := unstructured.Unstructured{} + u.SetUnstructuredContent(content) + if err := unstructured.SetNestedField(u.Object, val, "status", "lastHandledReconcileAt"); err != nil { + return err + } + return runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) +} + +// GetStatusObservedGeneration returns the status.observedGeneration of a given +// runtime object. +func GetStatusObservedGeneration(obj runtime.Object) (int64, error) { + u, err := toUnstructured(obj) + if err != nil { + return 0, err + } + og, found, err := unstructured.NestedInt64(u.Object, "status", "observedGeneration") + if err != nil { + return 0, err + } + if !found { + return 0, ErrObservedGenerationNotFound + } + return og, nil +} + +// GetRequeueInterval returns the spec.interval of a given runtime object, if +// present. +func GetRequeueInterval(obj runtime.Object) (time.Duration, error) { + period := time.Second + u, err := toUnstructured(obj) + if err != nil { + return period, err + } + interval, found, err := unstructured.NestedString(u.Object, "spec", "interval") + if err != nil { + return period, err + } + if !found { + return period, ErrRequeueIntervalNotFound + } + return time.ParseDuration(interval) +} + +// GetSuspend returns the spec.suspend of a given runtime object. +func GetSuspend(obj runtime.Object) (bool, error) { + u, err := toUnstructured(obj) + if err != nil { + return false, err + } + suspend, found, err := unstructured.NestedBool(u.Object, "spec", "suspend") + if err != nil { + return false, err + } + // Since suspend is an optional field, it's false when not found. + if !found { + return false, nil + } + return suspend, nil +} + +// SetSuspend sets the spec.suspend value of a given runtime object. +func SetSuspend(obj runtime.Object, val bool) error { + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return err + } + u := unstructured.Unstructured{} + u.SetUnstructuredContent(content) + if err := unstructured.SetNestedField(u.Object, val, "spec", "suspend"); err != nil { + return err + } + return runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) +} + +// GetArtifact returns the status.artifact of a given runtime object. +func GetArtifact(obj runtime.Object) (*meta.Artifact, error) { + u, err := toUnstructured(obj) + if err != nil { + return nil, err + } + artifact, found, err := unstructured.NestedFieldNoCopy(u.Object, "status", "artifact") + if err != nil { + return nil, err + } + // Since artifact is an optional field, return nil when not found. + if !found { + return nil, nil + } + enc, err := json.Marshal(artifact) + if err != nil { + return nil, err + } + outArtifact := &meta.Artifact{} + if err := json.Unmarshal(enc, outArtifact); err != nil { + return nil, err + } + return outArtifact, nil +} diff --git a/internal/object/object_test.go b/internal/object/object_test.go new file mode 100644 index 000000000..35cab3303 --- /dev/null +++ b/internal/object/object_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +func TestGetStatusLastHandledReconcileAt(t *testing.T) { + g := NewWithT(t) + + // Get unset status lastHandledReconcileAt. + obj := &sourcev1.GitRepository{} + _, err := GetStatusLastHandledReconcileAt(obj) + g.Expect(err).To(Equal(ErrLastHandledReconcileAtNotFound)) + + // Get set status lastHandledReconcileAt. + obj.Status.LastHandledReconcileAt = "foo" + ra, err := GetStatusLastHandledReconcileAt(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ra).To(Equal("foo")) +} + +func TestSetStatusLastHandledReconcileAt(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + err := SetStatusLastHandledReconcileAt(obj, "now") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) +} + +func TestGetStatusObservedGeneration(t *testing.T) { + g := NewWithT(t) + + // Get unset status observedGeneration. + obj := &sourcev1.GitRepository{} + _, err := GetStatusObservedGeneration(obj) + g.Expect(err).To(Equal(ErrObservedGenerationNotFound)) + + // Get set status observedGeneration. + obj.Status.ObservedGeneration = 7 + og, err := GetStatusObservedGeneration(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(og).To(Equal(int64(7))) +} + +func TestGetRequeueInterval(t *testing.T) { + g := NewWithT(t) + + // Get empty requeue interval value. + obj := &sourcev1.GitRepository{} + pd, err := GetRequeueInterval(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pd).To(Equal(time.Duration(0))) + + // Get set requeue interval value. + obj.Spec.Interval = metav1.Duration{Duration: 3 * time.Second} + pd, err = GetRequeueInterval(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pd).To(Equal(3 * time.Second)) + + // Get non-existent requeue interval value. + obj2 := &corev1.Secret{} + _, err = GetRequeueInterval(obj2) + g.Expect(err).To(Equal(ErrRequeueIntervalNotFound)) +} + +func TestGetSuspend(t *testing.T) { + g := NewWithT(t) + + // Get unset suspend value. + obj := &sourcev1.GitRepository{} + suspend, err := GetSuspend(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(suspend).To(BeFalse()) + + // Get set suspend value. + obj.Spec.Suspend = true + suspend, err = GetSuspend(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(suspend).To(BeTrue()) +} + +func TestSetSuspend(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + err := SetSuspend(obj, true) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Spec.Suspend).To(BeTrue()) + + // Overwrite previous value. + err = SetSuspend(obj, false) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Spec.Suspend).To(BeFalse()) +} + +func TestGetArtifact(t *testing.T) { + g := NewWithT(t) + + // Get unset artifact value. + obj := &sourcev1.GitRepository{} + artifact, err := GetArtifact(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact).To(BeNil()) + + // Get set artifact value. + obj.Status.Artifact = &meta.Artifact{Path: "aaa", Revision: "zzz"} + artifact, err = GetArtifact(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact).ToNot(BeNil()) + g.Expect(artifact.Path).To(Equal("aaa")) + g.Expect(artifact.Revision).To(Equal("zzz")) +} diff --git a/internal/oci/auth.go b/internal/oci/auth.go new file mode 100644 index 000000000..6bd35c59e --- /dev/null +++ b/internal/oci/auth.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oci + +import ( + "context" + "strings" + + "github.com/google/go-containerregistry/pkg/authn" + + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +// Anonymous is an authn.AuthConfig that always returns an anonymous +// authenticator. It is useful for registries that do not require authentication +// or when the credentials are not known. +// It implements authn.Keychain `Resolve` method and can be used as a keychain. +type Anonymous authn.AuthConfig + +// Resolve implements authn.Keychain. +func (a Anonymous) Resolve(_ authn.Resource) (authn.Authenticator, error) { + return authn.Anonymous, nil +} + +// OIDCAuth generates the OIDC credential authenticator based on the specified cloud provider. +func OIDCAuth(ctx context.Context, url, provider string, opts ...auth.Option) (authn.Authenticator, error) { + u := strings.TrimPrefix(url, sourcev1.OCIRepositoryPrefix) + return authutils.GetArtifactRegistryCredentials(ctx, provider, u, opts...) +} diff --git a/internal/oci/cosign/cosign.go b/internal/oci/cosign/cosign.go new file mode 100644 index 000000000..75af33091 --- /dev/null +++ b/internal/oci/cosign/cosign.go @@ -0,0 +1,162 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cosign + +import ( + "context" + "crypto" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" + "github.com/sigstore/cosign/v2/pkg/cosign" + ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + + soci "github.com/fluxcd/source-controller/internal/oci" +) + +// options is a struct that holds options for verifier. +type options struct { + publicKey []byte + rOpt []remote.Option + identities []cosign.Identity +} + +// Options is a function that configures the options applied to a Verifier. +type Options func(opts *options) + +// WithPublicKey sets the public key. +func WithPublicKey(publicKey []byte) Options { + return func(opts *options) { + opts.publicKey = publicKey + } +} + +// WithRemoteOptions is a functional option for overriding the default +// remote options used by the verifier. +func WithRemoteOptions(opts ...remote.Option) Options { + return func(o *options) { + o.rOpt = opts + } +} + +// WithIdentities specifies the identity matchers that have to be met +// for the signature to be deemed valid. +func WithIdentities(identities []cosign.Identity) Options { + return func(opts *options) { + opts.identities = identities + } +} + +// CosignVerifier is a struct which is responsible for executing verification logic. +type CosignVerifier struct { + opts *cosign.CheckOpts +} + +// NewCosignVerifier initializes a new CosignVerifier. +func NewCosignVerifier(ctx context.Context, opts ...Options) (*CosignVerifier, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + + checkOpts := &cosign.CheckOpts{} + + ro := coptions.RegistryOptions{} + co, err := ro.ClientOpts(ctx) + if err != nil { + return nil, err + } + + checkOpts.Identities = o.identities + if o.rOpt != nil { + co = append(co, ociremote.WithRemoteOptions(o.rOpt...)) + } + + checkOpts.RegistryClientOpts = co + + // If a public key is provided, it will use it to verify the signature. + // If there is no public key provided, it will try keyless verification. + // https://github.com/sigstore/cosign/blob/main/KEYLESS.md. + if len(o.publicKey) > 0 { + checkOpts.Offline = true + // TODO(hidde): this is an oversight in our implementation. As it is + // theoretically possible to have a custom PK, without disabling tlog. + checkOpts.IgnoreTlog = true + + pubKeyRaw, err := cryptoutils.UnmarshalPEMToPublicKey(o.publicKey) + if err != nil { + return nil, err + } + + checkOpts.SigVerifier, err = signature.LoadVerifier(pubKeyRaw, crypto.SHA256) + if err != nil { + return nil, err + } + } else { + checkOpts.RekorClient, err = rekor.NewClient(coptions.DefaultRekorURL) + if err != nil { + return nil, fmt.Errorf("unable to create Rekor client: %w", err) + } + + // This performs an online fetch of the Rekor public keys, but this is needed + // for verifying tlog entries (both online and offline). + // TODO(hidde): above note is important to keep in mind when we implement + // "offline" tlog above. + if checkOpts.RekorPubKeys, err = cosign.GetRekorPubs(ctx); err != nil { + return nil, fmt.Errorf("unable to get Rekor public keys: %w", err) + } + + checkOpts.CTLogPubKeys, err = cosign.GetCTLogPubs(ctx) + if err != nil { + return nil, fmt.Errorf("unable to get CTLog public keys: %w", err) + } + + if checkOpts.RootCerts, err = fulcio.GetRoots(); err != nil { + return nil, fmt.Errorf("unable to get Fulcio root certs: %w", err) + } + + if checkOpts.IntermediateCerts, err = fulcio.GetIntermediates(); err != nil { + return nil, fmt.Errorf("unable to get Fulcio intermediate certs: %w", err) + } + } + + return &CosignVerifier{ + opts: checkOpts, + }, nil +} + +// Verify verifies the authenticity of the given ref OCI image. +// It returns a boolean indicating if the verification was successful. +// It returns an error if the verification fails, nil otherwise. +func (v *CosignVerifier) Verify(ctx context.Context, ref name.Reference) (soci.VerificationResult, error) { + signatures, _, err := cosign.VerifyImageSignatures(ctx, ref, v.opts) + if err != nil { + return soci.VerificationResultFailed, err + } + + if len(signatures) == 0 { + return soci.VerificationResultFailed, nil + } + + return soci.VerificationResultSuccess, nil +} diff --git a/internal/oci/cosign/cosign_test.go b/internal/oci/cosign/cosign_test.go new file mode 100644 index 000000000..f99e7d1f6 --- /dev/null +++ b/internal/oci/cosign/cosign_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cosign + +import ( + "context" + "fmt" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + . "github.com/onsi/gomega" + "github.com/sigstore/cosign/v2/pkg/cosign" + + testproxy "github.com/fluxcd/source-controller/tests/proxy" + testregistry "github.com/fluxcd/source-controller/tests/registry" +) + +func TestOptions(t *testing.T) { + tests := []struct { + name string + opts []Options + want *options + }{{ + name: "no options", + want: &options{}, + }, { + name: "signature option", + opts: []Options{WithPublicKey([]byte("foo"))}, + want: &options{ + publicKey: []byte("foo"), + rOpt: nil, + }, + }, { + name: "keychain option", + opts: []Options{WithRemoteOptions(remote.WithAuthFromKeychain(authn.DefaultKeychain))}, + want: &options{ + publicKey: nil, + rOpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, + }, + }, { + name: "keychain and authenticator option", + opts: []Options{WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + )}, + want: &options{ + publicKey: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + }, + }, + }, { + name: "keychain, authenticator and transport option", + opts: []Options{WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + )}, + want: &options{ + publicKey: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + }, + }, + }, { + name: "identities option", + opts: []Options{WithIdentities([]cosign.Identity{ + { + SubjectRegExp: "test-user", + IssuerRegExp: "^https://token.actions.githubusercontent.com$", + }, + { + SubjectRegExp: "dev-user", + IssuerRegExp: "^https://accounts.google.com$", + }, + })}, + want: &options{ + identities: []cosign.Identity{ + { + SubjectRegExp: "test-user", + IssuerRegExp: "^https://token.actions.githubusercontent.com$", + }, + { + SubjectRegExp: "dev-user", + IssuerRegExp: "^https://accounts.google.com$", + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + o := options{} + for _, opt := range test.opts { + opt(&o) + } + if !reflect.DeepEqual(o.publicKey, test.want.publicKey) { + t.Errorf("got %#v, want %#v", &o.publicKey, test.want.publicKey) + } + + if test.want.rOpt != nil { + if len(o.rOpt) != len(test.want.rOpt) { + t.Errorf("got %d remote options, want %d", len(o.rOpt), len(test.want.rOpt)) + } + return + } + + if test.want.rOpt == nil { + if len(o.rOpt) != 0 { + t.Errorf("got %d remote options, want %d", len(o.rOpt), 0) + } + } + }) + } +} + +func TestPrivateKeyVerificationWithProxy(t *testing.T) { + g := NewWithT(t) + + registryAddr := testregistry.New(t) + + tagURL := fmt.Sprintf("%s/fluxcd/source-controller:v1.3.0", registryAddr) + ref, err := name.ParseReference(tagURL) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + keys, err := cosign.GenerateKeyPair(func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + }) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + err: "image tag not found", + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(tt.proxyURL) + + var opts []Options + opts = append(opts, WithRemoteOptions(remote.WithTransport(transport))) + opts = append(opts, WithPublicKey(keys.PublicBytes)) + + verifier, err := NewCosignVerifier(ctx, opts...) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = verifier.Verify(ctx, ref) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + }) + } +} diff --git a/internal/oci/notation/notation.go b/internal/oci/notation/notation.go new file mode 100644 index 000000000..0158ffd03 --- /dev/null +++ b/internal/oci/notation/notation.go @@ -0,0 +1,404 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package notation + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "strings" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + _ "github.com/notaryproject/notation-core-go/signature/cose" + _ "github.com/notaryproject/notation-core-go/signature/jws" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/registry" + verifier "github.com/notaryproject/notation-go/verifier" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/notaryproject/notation-go/verifier/truststore" + oras "oras.land/oras-go/v2/registry/remote" + oauth "oras.land/oras-go/v2/registry/remote/auth" + retryhttp "oras.land/oras-go/v2/registry/remote/retry" + + "github.com/fluxcd/source-controller/internal/helm/common" + "github.com/fluxcd/source-controller/internal/oci" +) + +// name of the trustpolicy file defined in the Secret containing +// notation public keys. +const DefaultTrustPolicyKey = "trustpolicy.json" + +// options is a struct that holds options for verifier. +type options struct { + rootCertificates [][]byte + rOpt []remote.Option + trustPolicy *trustpolicy.Document + auth authn.Authenticator + keychain authn.Keychain + insecure bool + logger logr.Logger + transport *http.Transport +} + +// Options is a function that configures the options applied to a Verifier. +type Options func(opts *options) + +// WithInsecureRegistry sets notation to verify against insecure registry. +func WithInsecureRegistry(insecure bool) Options { + return func(opts *options) { + opts.insecure = insecure + } +} + +// WithTrustPolicy sets the trust policy configuration. +func WithTrustPolicy(trustPolicy *trustpolicy.Document) Options { + return func(opts *options) { + opts.trustPolicy = trustPolicy + } +} + +// WithRootCertificates is a functional option for overriding the default +// rootCertificate options used by the verifier to set the root CA certificate for notary. +// It takes in a list of certificate data as an array of byte slices. +// The function returns a options function option that sets the public certificate +// in the notation options. +func WithRootCertificates(data [][]byte) Options { + return func(opts *options) { + opts.rootCertificates = data + } +} + +// WithRemoteOptions is a functional option for overriding the default +// remote options used by the verifier +func WithRemoteOptions(opts ...remote.Option) Options { + return func(o *options) { + o.rOpt = opts + } +} + +// WithAuth is a functional option for overriding the default +// authenticator options used by the verifier +func WithAuth(auth authn.Authenticator) Options { + return func(o *options) { + o.auth = auth + } +} + +// WithKeychain is a functional option for overriding the default +// keychain options used by the verifier +func WithKeychain(key authn.Keychain) Options { + return func(o *options) { + o.keychain = key + } +} + +// WithLogger is a function that returns an Options function to set the logger for the options. +// The logger is used for logging purposes within the options. +func WithLogger(logger logr.Logger) Options { + return func(o *options) { + o.logger = logger + } +} + +// WithTransport is a function that returns an Options function to set the transport for the options. +func WithTransport(transport *http.Transport) Options { + return func(o *options) { + o.transport = transport + } +} + +// NotationVerifier is a struct which is responsible for executing verification logic +type NotationVerifier struct { + auth authn.Authenticator + keychain authn.Keychain + verifier *notation.Verifier + opts []remote.Option + insecure bool + logger logr.Logger + transport *http.Transport +} + +var _ truststore.X509TrustStore = &trustStore{} + +// trustStore is used by notation-go/verifier to retrieve the root certificate for notary. +// The default behaviour is to read the certificate from disk and return it as a byte slice. +// The reason for implementing the interface here is to avoid reading the certificate from disk +// as the certificate is already available in memory. +type trustStore struct { + certs [][]byte +} + +// GetCertificates implements truststore.X509TrustStore. +func (s trustStore) GetCertificates(ctx context.Context, storeType truststore.Type, namedStore string) ([]*x509.Certificate, error) { + certs := []*x509.Certificate{} + for _, data := range s.certs { + raw := data + block, _ := pem.Decode(raw) + if block != nil { + raw = block.Bytes + } + + cert, err := x509.ParseCertificates(raw) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate '%s': %s", namedStore, err) + } + + certs = append(certs, cert...) + } + + return certs, nil +} + +// NewNotationVerifier initializes a new Verifier +func NewNotationVerifier(opts ...Options) (*NotationVerifier, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + + store := &trustStore{ + certs: o.rootCertificates, + } + + trustpolicy := o.trustPolicy + if trustpolicy == nil { + return nil, fmt.Errorf("trust policy cannot be empty") + } + + verifier, err := verifier.New(trustpolicy, store, nil) + if err != nil { + return nil, err + } + + return &NotationVerifier{ + auth: o.auth, + keychain: o.keychain, + verifier: &verifier, + opts: o.rOpt, + insecure: o.insecure, + logger: o.logger, + transport: o.transport, + }, nil +} + +// CleanTrustPolicy cleans the given trust policy by removing trust stores and trusted identities +// for trust policy statements that are set to skip signature verification but still have configured trust stores and/or trusted identities. +// It takes a pointer to a trustpolicy.Document and a logger from the logr package as input parameters. +// If the trustPolicy is nil, it returns nil. +// Otherwise, it iterates over the trustPolicy.TrustPolicies and checks if each trust policy statement's +// SignatureVerification.VerificationLevel is set to trustpolicy.LevelSkip.Name. +// If it is, it logs a warning message and removes the trust stores and trusted identities for that trust policy statement. +// Finally, it returns the modified trustPolicy. +func CleanTrustPolicy(trustPolicy *trustpolicy.Document, logger logr.Logger) *trustpolicy.Document { + if trustPolicy == nil { + return nil + } + + for i, j := range trustPolicy.TrustPolicies { + if j.SignatureVerification.VerificationLevel == trustpolicy.LevelSkip.Name { + if len(j.TrustStores) > 0 || len(j.TrustedIdentities) > 0 { + logger.Info(fmt.Sprintf("warning: trust policy statement '%s' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", j.Name)) + } + trustPolicy.TrustPolicies[i].TrustStores = []string{} + trustPolicy.TrustPolicies[i].TrustedIdentities = []string{} + } + } + + return trustPolicy +} + +// Verify verifies the authenticity of the given ref OCI image. +// It returns a boolean indicating if the verification was successful. +// It returns an error if the verification fails, nil otherwise. +func (v *NotationVerifier) Verify(ctx context.Context, ref name.Reference) (oci.VerificationResult, error) { + url := ref.Name() + + remoteRepo, err := v.remoteRepo(url) + if err != nil { + return oci.VerificationResultFailed, err + } + + repo := registry.NewRepository(remoteRepo) + + repoUrl, err := v.repoUrlWithDigest(url, ref) + if err != nil { + return oci.VerificationResultFailed, err + } + + verifyOptions := notation.VerifyOptions{ + ArtifactReference: repoUrl, + MaxSignatureAttempts: 3, + } + + _, outcomes, err := notation.Verify(ctx, *v.verifier, repo, verifyOptions) + if err != nil { + return oci.VerificationResultFailed, err + } + + return v.checkOutcome(outcomes, url) +} + +// checkOutcome checks the verification outcomes for a given URL and returns the corresponding OCI verification result. +// It takes a slice of verification outcomes and a URL as input parameters. +// If there are no verification outcomes, it returns a failed verification result with an error message. +// If the first verification outcome has a verification level of "trustpolicy.LevelSkip", it returns an ignored verification result. +// This function assumes that "trustpolicy.TypeIntegrity" is always enforced. It will return a successful validation result if "trustpolicy.TypeAuthenticity" is successful too. +// If any of the verification results have an error, it logs the error message and sets the "ignore" flag to true if the error type is "trustpolicy.TypeAuthenticity". +// If the "ignore" flag is true, it returns an ignored verification result. +// Otherwise, it returns a successful verification result. +// The function returns the OCI verification result and an error, if any. +func (v *NotationVerifier) checkOutcome(outcomes []*notation.VerificationOutcome, url string) (oci.VerificationResult, error) { + if len(outcomes) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("signature verification failed for all the signatures associated with %s", url) + } + + // should only ever be one item in the outcomes slice + outcome := outcomes[0] + + // if the verification level is set to skip, we ignore the verification result + // as there should be no verification results in outcome and we do not want + // to mark the result as verified + if outcome.VerificationLevel == trustpolicy.LevelSkip { + return oci.VerificationResultIgnored, nil + } + + ignore := false + + // loop through verification results to check for errors + for _, i := range outcome.VerificationResults { + // error if action is not marked as `skip` and there is an error + if i.Error != nil { + // flag to ignore the verification result if the error is related to type `authenticity` + if i.Type == trustpolicy.TypeAuthenticity { + ignore = true + } + // log results of error + v.logger.Info(fmt.Sprintf("verification check for type '%s' failed for '%s' with message: '%s'", i.Type, url, i.Error.Error())) + } + } + + // if the ignore flag is set, we ignore the verification result so not to mark as verified + if ignore { + return oci.VerificationResultIgnored, nil + } + + // result is okay to mark as verified + return oci.VerificationResultSuccess, nil +} + +// remoteRepo is a function that creates a remote repository object for the given repository URL. +// It initializes the repository with the provided URL and sets the PlainHTTP flag based on the value of the 'insecure' field in the Verifier struct. +// It also sets up the credential provider based on the authentication configuration provided in the Verifier struct. +// If authentication is required, it retrieves the authentication credentials and sets up the repository client with the appropriate headers and credentials. +// Finally, it returns the remote repository object and any error encountered during the process. +func (v *NotationVerifier) remoteRepo(repoUrl string) (*oras.Repository, error) { + remoteRepo, err := oras.NewRepository(repoUrl) + if err != nil { + return &oras.Repository{}, err + } + + remoteRepo.PlainHTTP = v.insecure + + credentialProvider := func(ctx context.Context, registry string) (oauth.Credential, error) { + return oauth.EmptyCredential, nil + } + + auth := authn.Anonymous + + if v.auth != nil { + auth = v.auth + } else if v.keychain != nil { + source := common.StringResource{Registry: repoUrl} + + auth, err = v.keychain.Resolve(source) + if err != nil { + return &oras.Repository{}, err + } + } + + if auth != authn.Anonymous { + authConfig, err := auth.Authorization() + if err != nil { + return &oras.Repository{}, err + } + + credentialProvider = func(ctx context.Context, registry string) (oauth.Credential, error) { + if authConfig.Username != "" || authConfig.Password != "" || authConfig.IdentityToken != "" || authConfig.RegistryToken != "" { + return oauth.Credential{ + Username: authConfig.Username, + Password: authConfig.Password, + RefreshToken: authConfig.IdentityToken, + AccessToken: authConfig.RegistryToken, + }, nil + } + return oauth.EmptyCredential, nil + } + } + + hc := retryhttp.DefaultClient + if v.transport != nil { + hc = &http.Client{ + Transport: retryhttp.NewTransport(v.transport), + } + } + repoClient := &oauth.Client{ + Client: hc, + Header: http.Header{ + "User-Agent": {"flux"}, + }, + Credential: credentialProvider, + } + + remoteRepo.Client = repoClient + + return remoteRepo, nil +} + +// repoUrlWithDigest takes a repository URL and a reference and returns the repository URL with the digest appended to it. +// If the repository URL does not contain a tag or digest, it returns an error. +func (v *NotationVerifier) repoUrlWithDigest(repoUrl string, ref name.Reference) (string, error) { + if !strings.Contains(repoUrl, "@") { + image, err := remote.Image(ref, v.opts...) + if err != nil { + return "", err + } + + digest, err := image.Digest() + if err != nil { + return "", err + } + + lastIndex := strings.LastIndex(repoUrl, ":") + if lastIndex == -1 { + return "", fmt.Errorf("url %s does not contain tag or digest", repoUrl) + } + + firstPart := repoUrl[:lastIndex] + + if s := strings.Split(repoUrl, ":"); len(s) >= 2 { + repoUrl = fmt.Sprintf("%s@%s", firstPart, digest) + } else { + return "", fmt.Errorf("url %s does not contain tag or digest", repoUrl) + } + } + return repoUrl, nil +} diff --git a/internal/oci/notation/notation_test.go b/internal/oci/notation/notation_test.go new file mode 100644 index 000000000..cdd8a3872 --- /dev/null +++ b/internal/oci/notation/notation_test.go @@ -0,0 +1,651 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package notation + +import ( + "context" + "fmt" + "net/http" + "net/url" + "path" + "reflect" + "testing" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + + "github.com/fluxcd/source-controller/internal/oci" + testproxy "github.com/fluxcd/source-controller/tests/proxy" + testregistry "github.com/fluxcd/source-controller/tests/registry" +) + +func TestOptions(t *testing.T) { + testCases := []struct { + name string + opts []Options + want *options + }{ + { + name: "no options", + want: &options{}, + }, + { + name: "signature option", + opts: []Options{WithRootCertificates([][]byte{[]byte("foo")})}, + want: &options{ + rootCertificates: [][]byte{[]byte("foo")}, + rOpt: nil, + }, + }, + { + name: "keychain option", + opts: []Options{ + WithRemoteOptions(remote.WithAuthFromKeychain(authn.DefaultKeychain)), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "keychain and authenticator option", + opts: []Options{ + WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + ), + WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + }, + auth: &authn.Basic{Username: "foo", Password: "bar"}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "keychain, authenticator and transport option", + opts: []Options{ + WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + ), + WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + }, + auth: &authn.Basic{Username: "foo", Password: "bar"}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "truststore, empty document", + opts: []Options{WithTrustPolicy(&trustpolicy.Document{})}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: &trustpolicy.Document{}, + }, + }, + { + name: "truststore, dummy document", + opts: []Options{WithTrustPolicy(dummyPolicyDocument())}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: dummyPolicyDocument(), + }, + }, + { + name: "insecure, false", + opts: []Options{WithInsecureRegistry(false)}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: false, + }, + }, + { + name: "insecure, true", + opts: []Options{WithInsecureRegistry(true)}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: true, + }, + }, + { + name: "insecure, default", + opts: []Options{}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: false, + }, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + o := options{} + for _, opt := range tc.opts { + opt(&o) + } + if !reflect.DeepEqual(o.rootCertificates, tc.want.rootCertificates) { + t.Errorf("got %#v, want %#v", &o.rootCertificates, tc.want.rootCertificates) + } + + if !reflect.DeepEqual(o.trustPolicy, tc.want.trustPolicy) { + t.Errorf("got %#v, want %#v", &o.trustPolicy, tc.want.trustPolicy) + } + + if tc.want.rOpt != nil { + if len(o.rOpt) != len(tc.want.rOpt) { + t.Errorf("got %d remote options, want %d", len(o.rOpt), len(tc.want.rOpt)) + } + return + } + + if tc.want.rOpt == nil { + if len(o.rOpt) != 0 { + t.Errorf("got %d remote options, want %d", len(o.rOpt), 0) + } + } + }) + } +} + +func TestCleanTrustPolicy(t *testing.T) { + testCases := []struct { + name string + policy []trustpolicy.TrustPolicy + want *trustpolicy.Document + wantLogMessage string + }{ + { + name: "no trust policy", + want: nil, + }, + { + name: "trust policy verification level set to strict and should not be cleaned", + policy: []trustpolicy.TrustPolicy{{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }}, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }}, + }, + }, + { + name: "trust policy with multiple policies and should not be cleaned", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }, + }, + }, + }, + { + name: "trust policy verification level skip should be cleaned", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{}, + TrustedIdentities: []string{}, + }, + }, + }, + wantLogMessage: "warning: trust policy statement 'test-statement-name' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", + }, + { + name: "trust policy with multiple policies and mixture of verification levels including skip", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{}, + TrustedIdentities: []string{}, + }, + }, + }, + wantLogMessage: "warning: trust policy statement 'test-statement-name-2' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + var policy *trustpolicy.Document + + if tc.policy != nil { + policy = &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: tc.policy, + } + } + + cleanedPolicy := CleanTrustPolicy(policy, logger) + + if !reflect.DeepEqual(cleanedPolicy, tc.want) { + t.Errorf("got %#v, want %#v", cleanedPolicy, tc.want) + } + + if tc.wantLogMessage != "" { + g.Expect(len(l.Output)).Should(Equal(1)) + g.Expect(l.Output[0]).Should(Equal(tc.wantLogMessage)) + } + }) + } +} + +func TestOutcomeChecker(t *testing.T) { + testCases := []struct { + name string + outcome []*notation.VerificationOutcome + wantErrMessage string + wantLogMessage []string + wantVerificationResult oci.VerificationResult + }{ + { + name: "no outcome failed with error message", + wantVerificationResult: oci.VerificationResultFailed, + wantErrMessage: "signature verification failed for all the signatures associated with example.com/podInfo", + }, + { + name: "verification result ignored with log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{ + { + Type: trustpolicy.TypeAuthenticity, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("123"), + }, + }, + }, + }, + wantVerificationResult: oci.VerificationResultIgnored, + wantLogMessage: []string{"verification check for type 'authenticity' failed for 'example.com/podInfo' with message: '123'"}, + }, + { + name: "verification result ignored with no log message (skip)", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelSkip, + VerificationResults: []*notation.ValidationResult{}, + }, + }, + wantVerificationResult: oci.VerificationResultIgnored, + }, + { + name: "verification result success with log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{ + { + Type: trustpolicy.TypeAuthenticTimestamp, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("456"), + }, + { + Type: trustpolicy.TypeExpiry, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("789"), + }, + }, + }, + }, + wantVerificationResult: oci.VerificationResultSuccess, + wantLogMessage: []string{ + "verification check for type 'authenticTimestamp' failed for 'example.com/podInfo' with message: '456'", + "verification check for type 'expiry' failed for 'example.com/podInfo' with message: '789'", + }, + }, + { + name: "verification result success with no log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{}, + }, + }, + wantVerificationResult: oci.VerificationResultSuccess, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + v := NotationVerifier{ + logger: logger, + } + + result, err := v.checkOutcome(tc.outcome, "example.com/podInfo") + + if tc.wantErrMessage != "" { + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).Should(Equal(tc.wantErrMessage)) + } else { + g.Expect(err).To(BeNil()) + } + + g.Expect(result).Should(Equal(tc.wantVerificationResult)) + g.Expect(len(l.Output)).Should(Equal(len(tc.wantLogMessage))) + + for i, j := range tc.wantLogMessage { + g.Expect(l.Output[i]).Should(Equal(j)) + } + }) + } +} + +func TestRepoUrlWithDigest(t *testing.T) { + testCases := []struct { + name string + repoUrl string + digest string + tag string + wantResultUrl string + wantErrMessage string + passUrlWithoutTag bool + }{ + { + name: "valid repo url with digest", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + digest: "sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "", + }, + { + name: "valid repo url with tag", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + tag: "6.6.0", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "", + }, + { + name: "valid repo url without tag", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + tag: "6.6.0", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "url ghcr.io/stefanprodan/charts/podinfo does not contain tag or digest", + passUrlWithoutTag: true, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + v := NotationVerifier{ + logger: logger, + } + + var url string + repo, _ := name.NewRepository(tc.repoUrl) + var ref name.Reference + if tc.digest != "" { + ref = repo.Digest(tc.digest) + url = fmt.Sprintf("%s@%s", tc.repoUrl, tc.digest) + } else if tc.tag != "" { + ref = repo.Tag(tc.tag) + if !tc.passUrlWithoutTag { + url = fmt.Sprintf("%s:%s", tc.repoUrl, tc.tag) + } else { + url = tc.repoUrl + } + } else { + ref = repo.Tag(name.DefaultTag) + url = fmt.Sprintf("%s:%s", tc.repoUrl, name.DefaultTag) + } + + result, err := v.repoUrlWithDigest(url, ref) + + if tc.wantErrMessage != "" { + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).Should(Equal(tc.wantErrMessage)) + } else { + g.Expect(err).To(BeNil()) + g.Expect(result).Should(Equal(tc.wantResultUrl)) + } + }) + } +} + +func TestVerificationWithProxy(t *testing.T) { + g := NewWithT(t) + + registryAddr := testregistry.New(t) + + tarFilePath := path.Join("..", "..", "controller", "testdata", "podinfo", "podinfo-6.1.5.tar") + _, err := testregistry.CreatePodinfoImageFromTar(tarFilePath, "6.1.5", registryAddr) + g.Expect(err).NotTo(HaveOccurred()) + + tagURL := fmt.Sprintf("%s/podinfo:6.1.5", registryAddr) + ref, err := name.ParseReference(tagURL) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + err: "no signature is associated with", + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(tt.proxyURL) + + var opts []Options + opts = append(opts, WithTransport(transport)) + opts = append(opts, WithTrustPolicy(dummyPolicyDocument())) + opts = append(opts, WithInsecureRegistry(true)) + + verifier, err := NewNotationVerifier(opts...) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = verifier.Verify(ctx, ref) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + }) + } +} + +func dummyPolicyDocument() (policyDoc *trustpolicy.Document) { + policyDoc = &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{dummyPolicyStatement()}, + } + return +} + +func dummyPolicyStatement() (policyStatement trustpolicy.TrustPolicy) { + policyStatement = trustpolicy.TrustPolicy{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"ca:valid-trust-store", "signingAuthority:valid-trust-store"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + } + return +} + +// mocking LogSink to capture log messages. Source: https://stackoverflow.com/a/71425740 +type testLogger struct { + Output []string + r logr.RuntimeInfo +} + +func (t *testLogger) doLog(msg string) { + t.Output = append(t.Output, msg) +} + +func (t *testLogger) Init(info logr.RuntimeInfo) { + t.r = info +} + +func (t *testLogger) Enabled(level int) bool { + return true +} + +func (t *testLogger) Info(level int, msg string, keysAndValues ...interface{}) { + t.doLog(msg) +} + +func (t *testLogger) Error(err error, msg string, keysAndValues ...interface{}) { + t.doLog(msg) +} + +func (t *testLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { + return t +} + +func (t *testLogger) WithName(name string) logr.LogSink { + return t +} diff --git a/internal/oci/verifier.go b/internal/oci/verifier.go new file mode 100644 index 000000000..eeb301eb0 --- /dev/null +++ b/internal/oci/verifier.go @@ -0,0 +1,42 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oci + +import ( + "context" + + "github.com/google/go-containerregistry/pkg/name" +) + +// VerificationResult represents the result of a verification process. +type VerificationResult string + +const ( + // VerificationResultSuccess indicates that the artifact has been verified. + VerificationResultSuccess VerificationResult = "verified" + // VerificationResultFailed indicates that the artifact could not be verified. + VerificationResultFailed VerificationResult = "unverified" + // VerificationResultIgnored indicates that the artifact has not been verified + // but is allowed to proceed. This is used primarily when notation is used + // as the verifier. + VerificationResultIgnored VerificationResult = "ignored" +) + +// Verifier is an interface for verifying the authenticity of an OCI image. +type Verifier interface { + Verify(ctx context.Context, ref name.Reference) (VerificationResult, error) +} diff --git a/internal/predicates/helmrepository_type_predicate.go b/internal/predicates/helmrepository_type_predicate.go new file mode 100644 index 000000000..714d77942 --- /dev/null +++ b/internal/predicates/helmrepository_type_predicate.go @@ -0,0 +1,86 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicates + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +// HelmRepositoryOCIMigrationPredicate implements predicate functions to allow +// events for HelmRepository OCI that need migration to static object. Non-OCI +// HelmRepositories are always allowed. +type HelmRepositoryOCIMigrationPredicate struct { + predicate.Funcs +} + +// Create allows events for objects that need migration to static object. +func (HelmRepositoryOCIMigrationPredicate) Create(e event.CreateEvent) bool { + return HelmRepositoryOCIRequireMigration(e.Object) +} + +// Update allows events for objects that need migration to static object. +func (HelmRepositoryOCIMigrationPredicate) Update(e event.UpdateEvent) bool { + return HelmRepositoryOCIRequireMigration(e.ObjectNew) +} + +// Delete allows events for objects that need migration to static object. +func (HelmRepositoryOCIMigrationPredicate) Delete(e event.DeleteEvent) bool { + return HelmRepositoryOCIRequireMigration(e.Object) +} + +// HelmRepositoryOCIRequireMigration returns if a given HelmRepository of type +// OCI requires migration to static object. For non-OCI HelmRepository, it +// returns true. +func HelmRepositoryOCIRequireMigration(o client.Object) bool { + if o == nil { + return false + } + + hr, ok := o.(*sourcev1.HelmRepository) + if !ok { + return false + } + + if hr.Spec.Type != sourcev1.HelmRepositoryTypeOCI { + // Always allow non-OCI HelmRepository. + return true + } + + if controllerutil.ContainsFinalizer(hr, sourcev1.SourceFinalizer) || !hasEmptyHelmRepositoryStatus(hr) { + return true + } + + return false +} + +// hasEmptyHelmRepositoryStatus checks if the status of a HelmRepository is +// empty. +func hasEmptyHelmRepositoryStatus(obj *sourcev1.HelmRepository) bool { + if obj.Status.ObservedGeneration == 0 && + obj.Status.Conditions == nil && + obj.Status.URL == "" && + obj.Status.Artifact == nil && + obj.Status.ReconcileRequestStatus.LastHandledReconcileAt == "" { + return true + } + return false +} diff --git a/internal/predicates/helmrepository_type_predicate_test.go b/internal/predicates/helmrepository_type_predicate_test.go new file mode 100644 index 000000000..e98728413 --- /dev/null +++ b/internal/predicates/helmrepository_type_predicate_test.go @@ -0,0 +1,258 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicates + +import ( + "testing" + + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +func TestHelmRepositoryOCIMigrationPredicate_Create(t *testing.T) { + tests := []struct { + name string + beforeFunc func(o *sourcev1.HelmRepository) + want bool + }{ + { + name: "new oci helm repo no status", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: false, + }, + { + name: "new oci helm repo with default observed gen status", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + o.Status.ObservedGeneration = -1 + }, + want: true, + }, + { + name: "old oci helm repo with finalizer only", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Finalizers = []string{sourcev1.SourceFinalizer} + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: true, + }, + { + name: "old oci helm repo with status only", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + o.Status = sourcev1.HelmRepositoryStatus{ + ObservedGeneration: 3, + } + conditions.MarkTrue(o, meta.ReadyCondition, "foo", "bar") + }, + want: true, + }, + { + name: "old oci helm repo with finalizer and status", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Finalizers = []string{sourcev1.SourceFinalizer} + o.Spec.Type = sourcev1.HelmRepositoryTypeOCI + o.Status = sourcev1.HelmRepositoryStatus{ + ObservedGeneration: 3, + } + conditions.MarkTrue(o, meta.ReadyCondition, "foo", "bar") + }, + want: true, + }, + { + name: "new default helm repo", + beforeFunc: func(o *sourcev1.HelmRepository) { + o.Spec.Type = sourcev1.HelmRepositoryTypeDefault + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + o := &sourcev1.HelmRepository{} + if tt.beforeFunc != nil { + tt.beforeFunc(o) + } + e := event.CreateEvent{Object: o} + p := HelmRepositoryOCIMigrationPredicate{} + g.Expect(p.Create(e)).To(Equal(tt.want)) + }) + } +} + +func TestHelmRepositoryOCIMigrationPredicate_Update(t *testing.T) { + tests := []struct { + name string + beforeFunc func(oldObj, newObj *sourcev1.HelmRepository) + want bool + }{ + { + name: "update oci repo", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + URL: "oci://foo/bar", + } + *newObj = *oldObj.DeepCopy() + newObj.Spec.URL = "oci://foo/baz" + }, + want: false, + }, + { + name: "migrate old oci repo with status only", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Generation = 2 + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + oldObj.Status = sourcev1.HelmRepositoryStatus{ + ObservedGeneration: 2, + } + conditions.MarkTrue(oldObj, meta.ReadyCondition, "foo", "bar") + + *newObj = *oldObj.DeepCopy() + newObj.Generation = 3 + }, + want: true, + }, + { + name: "migrate old oci repo with finalizer only", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Generation = 2 + oldObj.Finalizers = []string{sourcev1.SourceFinalizer} + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + + *newObj = *oldObj.DeepCopy() + newObj.Generation = 3 + }, + want: true, + }, + { + name: "type switch default to oci", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeDefault, + } + oldObj.Status = sourcev1.HelmRepositoryStatus{ + Artifact: &meta.Artifact{}, + URL: "http://some-address", + ObservedGeneration: 3, + } + + *newObj = *oldObj.DeepCopy() + newObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + }, + want: true, + }, + { + name: "type switch oci to default", + beforeFunc: func(oldObj, newObj *sourcev1.HelmRepository) { + oldObj.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, + } + *newObj = *oldObj.DeepCopy() + newObj.Spec.Type = sourcev1.HelmRepositoryTypeDefault + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + oldObj := &sourcev1.HelmRepository{} + newObj := oldObj.DeepCopy() + if tt.beforeFunc != nil { + tt.beforeFunc(oldObj, newObj) + } + e := event.UpdateEvent{ + ObjectOld: oldObj, + ObjectNew: newObj, + } + p := HelmRepositoryOCIMigrationPredicate{} + g.Expect(p.Update(e)).To(Equal(tt.want)) + }) + } +} + +func TestHelmRepositoryOCIMigrationPredicate_Delete(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository) + want bool + }{ + { + name: "oci with finalizer", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Finalizers = []string{sourcev1.SourceFinalizer} + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: true, + }, + { + name: "oci with status", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + obj.Status.ObservedGeneration = 4 + }, + want: true, + }, + { + name: "oci without finalizer or status", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI + }, + want: false, + }, + { + name: "default helm repo", + beforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Spec.Type = sourcev1.HelmRepositoryTypeDefault + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.HelmRepository{} + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + e := event.DeleteEvent{Object: obj} + p := HelmRepositoryOCIMigrationPredicate{} + g.Expect(p.Delete(e)).To(Equal(tt.want)) + }) + } +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 000000000..27c931168 --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,233 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "time" + + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + serror "github.com/fluxcd/source-controller/internal/error" +) + +// Result is a type for creating an abstraction for the controller-runtime +// reconcile Result to simplify the Result values. +type Result int + +const ( + // ResultEmpty indicates a reconcile result which does not requeue. It is + // also used when returning an error, since the error overshadows result. + ResultEmpty Result = iota + // ResultRequeue indicates a reconcile result which should immediately + // requeue. + ResultRequeue + // ResultSuccess indicates a reconcile success result. + // For a reconciler that requeues regularly at a fixed interval, runtime + // result with a fixed RequeueAfter is success result. + // For a reconciler that doesn't requeue on successful reconciliation, + // an empty runtime result is success result. + // It is usually returned at the end of a reconciler/sub-reconciler. + ResultSuccess +) + +// RuntimeResultBuilder defines an interface for runtime result builders. This +// can be implemented to build custom results based on the context of the +// reconciler. +type RuntimeResultBuilder interface { + // BuildRuntimeResult analyzes the result and error to return a runtime + // result. + BuildRuntimeResult(rr Result, err error) ctrl.Result + // IsSuccess returns if a given runtime result is success for a + // RuntimeResultBuilder. + IsSuccess(ctrl.Result) bool +} + +// AlwaysRequeueResultBuilder implements a RuntimeResultBuilder for always +// requeuing reconcilers. A successful reconciliation result for such +// reconcilers contains a fixed RequeueAfter value. +type AlwaysRequeueResultBuilder struct { + // RequeueAfter is the fixed period at which the reconciler requeues on + // successful execution. + RequeueAfter time.Duration +} + +// BuildRuntimeResult converts a given Result and error into the +// return values of a controller's Reconcile function. +func (r AlwaysRequeueResultBuilder) BuildRuntimeResult(rr Result, err error) ctrl.Result { + // Handle special errors that contribute to expressing the result. + switch e := err.(type) { + case *serror.Waiting: + // Safeguard: If no RequeueAfter is set, use the default success + // RequeueAfter value to ensure a requeue takes place after some time. + if e.RequeueAfter == 0 { + return ctrl.Result{RequeueAfter: r.RequeueAfter} + } + return ctrl.Result{RequeueAfter: e.RequeueAfter} + case *serror.Generic: + // no-op error, reconcile at success interval. + if e.Ignore { + return ctrl.Result{RequeueAfter: r.RequeueAfter} + } + } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true} + case ResultSuccess: + return ctrl.Result{RequeueAfter: r.RequeueAfter} + default: + return ctrl.Result{} + } +} + +// IsSuccess returns true if the given Result has the same RequeueAfter value +// as of the AlwaysRequeueResultBuilder. +func (r AlwaysRequeueResultBuilder) IsSuccess(result ctrl.Result) bool { + return result.RequeueAfter == r.RequeueAfter +} + +// ComputeReconcileResult analyzes the reconcile results (result + error), +// updates the status conditions of the object with any corrections and returns +// object patch configuration, runtime result and runtime error. The caller is +// responsible for using the patch configuration while patching the object in +// the API server. +// The RuntimeResultBuilder is used to define how the ctrl.Result is computed. +func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb RuntimeResultBuilder) ([]patch.Option, ctrl.Result, error) { + var pOpts []patch.Option + + // Compute the runtime result. + var result ctrl.Result + if rb != nil { + result = rb.BuildRuntimeResult(res, recErr) + } + + // Remove reconciling condition on successful reconciliation. + if recErr == nil && res == ResultSuccess { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Presence of reconciling means that the reconciliation didn't succeed. + // Set the Reconciling reason to ProgressingWithRetry to indicate a failure + // retry. + if conditions.IsReconciling(obj) { + reconciling := conditions.Get(obj, meta.ReconcilingCondition) + reconciling.Reason = meta.ProgressingWithRetryReason + conditions.Set(obj, reconciling) + } + + // Analyze the reconcile error. + switch t := recErr.(type) { + case *serror.Stalling: + if res == ResultEmpty { + conditions.MarkStalled(obj, t.Reason, "%s", t.Error()) + // The current generation has been reconciled successfully and it + // has resulted in a stalled state. Return no error to stop further + // requeuing. + pOpts = addPatchOptionWithStatusObservedGeneration(obj, pOpts) + return pOpts, result, nil + } + // NOTE: Non-empty result with stalling error indicates that the + // returned result is incorrect. + case *serror.Waiting: + // The reconcile resulted in waiting error, remove stalled condition if + // present. + conditions.Delete(obj, meta.StalledCondition) + // The reconciler needs to wait and retry. Return no error. + return pOpts, result, nil + case *serror.Generic: + conditions.Delete(obj, meta.StalledCondition) + // If ignore, it's a no-op error, return no error, remove reconciling + // condition. + if t.Ignore { + // The current generation has been reconciled successfully with + // no-op result. Update status observed generation. + pOpts = addPatchOptionWithStatusObservedGeneration(obj, pOpts) + conditions.Delete(obj, meta.ReconcilingCondition) + return pOpts, result, nil + } + case nil: + // The reconcile didn't result in any error, we are not in stalled + // state. If a requeue is requested, the current generation has not been + // reconciled successfully. + if res != ResultRequeue { + pOpts = addPatchOptionWithStatusObservedGeneration(obj, pOpts) + } + conditions.Delete(obj, meta.StalledCondition) + default: + // The reconcile resulted in some error, but we are not in stalled + // state. + conditions.Delete(obj, meta.StalledCondition) + } + + return pOpts, result, recErr +} + +// LowestRequeuingResult returns the ReconcileResult with the lowest requeue +// period. +// Weightage: +// +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue +func LowestRequeuingResult(i, j Result) Result { + switch { + case i == ResultEmpty: + return j + case j == ResultEmpty: + return i + case i == ResultRequeue: + return i + case j == ResultRequeue: + return j + default: + return j + } +} + +// FailureRecovery finds out if a failure recovery occurred by checking the fail +// conditions in the old object and the new object. +func FailureRecovery(oldObj, newObj conditions.Getter, failConditions []string) bool { + failuresBefore := 0 + for _, failCondition := range failConditions { + if conditions.Get(oldObj, failCondition) != nil { + failuresBefore++ + } + if conditions.Get(newObj, failCondition) != nil { + // Short-circuit, there is failure now, can't be a recovery. + return false + } + } + return failuresBefore > 0 +} + +// addPatchOptionWithStatusObservedGeneration adds patch option +// WithStatusObservedGeneration to the provided patch option slice only if there +// is any condition present on the object, and returns it. This is necessary to +// prevent setting status observed generation without any effectual observation. +// An object must have some condition in the status if it has been observed. +// TODO: Move this to fluxcd/pkg/runtime/patch package after it has proven its +// need. +func addPatchOptionWithStatusObservedGeneration(obj conditions.Setter, opts []patch.Option) []patch.Option { + if len(obj.GetConditions()) > 0 { + opts = append(opts, patch.WithStatusObservedGeneration{}) + } + return opts +} diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go new file mode 100644 index 000000000..e22f370b5 --- /dev/null +++ b/internal/reconcile/reconcile_test.go @@ -0,0 +1,449 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" +) + +func TestLowestRequeuingResult(t *testing.T) { + tests := []struct { + name string + i Result + j Result + wantResult Result + }{ + {"bail,requeue", ResultEmpty, ResultRequeue, ResultRequeue}, + {"bail,requeueInterval", ResultEmpty, ResultSuccess, ResultSuccess}, + {"requeue,bail", ResultRequeue, ResultEmpty, ResultRequeue}, + {"requeue,requeueInterval", ResultRequeue, ResultSuccess, ResultRequeue}, + {"requeueInterval,requeue", ResultSuccess, ResultRequeue, ResultRequeue}, + {"requeueInterval,requeueInterval", ResultSuccess, ResultSuccess, ResultSuccess}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(LowestRequeuingResult(tt.i, tt.j)).To(Equal(tt.wantResult)) + }) + } +} + +// This test uses AlwaysRequeueResultBuilder as the RuntimeResultBuilder. +func TestComputeReconcileResult(t *testing.T) { + testSuccessInterval := time.Minute + tests := []struct { + name string + result Result + beforeFunc func(obj conditions.Setter) + recErr error + wantResult ctrl.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) + }{ + { + name: "successful result", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "foo"), + }, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + }, + }, + { + name: "successful result, Reconciling=True, remove Reconciling", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, "NewRevision", "new revision") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "foo"), + }, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.ReconcilingCondition)).To(BeTrue()) + }, + }, + { + name: "successful result, Stalled=True, remove Stalled", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkStalled(obj, "SomeReason", "some message") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + }, + { + name: "requeue result", + result: ResultRequeue, + recErr: nil, + wantResult: ctrl.Result{Requeue: true}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "stalling error", + result: ResultEmpty, + recErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantResult: ctrl.Result{}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + }, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + }, + }, + { + name: "waiting error", + result: ResultEmpty, + recErr: &serror.Waiting{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "generic error, Stalled=True, remove Stalled", + result: ResultEmpty, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkStalled(obj, "SomeReason", "some message") + }, + recErr: &serror.Generic{ + Err: fmt.Errorf("some error"), Reason: "some reason", + }, + wantResult: ctrl.Result{}, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + wantErr: true, + }, + { + name: "generic ignore error, Reconciling=True, remove Reconciling", + result: ResultEmpty, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, "NewRevision", "new revision") + }, + recErr: &serror.Generic{ + Err: fmt.Errorf("some error"), Reason: "some reason", + Config: serror.Config{ + Ignore: true, + }, + }, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.ReconcilingCondition)).To(BeTrue()) + }, + wantErr: false, + }, + { + name: "random error", + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "random error, Stalled=True, remove Stalled", + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + }, + { + name: "failed with Reconciling=True adds ProgressingWithRetry reason", + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "some msg") + }, + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "some msg"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + obj.Name = "test-git-repo" + obj.Namespace = "default" + obj.Spec.Interval = metav1.Duration{Duration: testSuccessInterval} + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + rb := AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration} + pOpts, result, err := ComputeReconcileResult(obj, tt.result, tt.recErr, rb) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(result).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + opts := &patch.HelperOptions{} + for _, o := range pOpts { + o.ApplyToHelper(opts) + } + if tt.afterFunc != nil { + tt.afterFunc(g, obj, opts) + } + }) + } +} + +func TestAlwaysRequeueResultBuilder_IsSuccess(t *testing.T) { + interval := 5 * time.Second + + tests := []struct { + name string + resultBuilder AlwaysRequeueResultBuilder + runtimeResult ctrl.Result + result bool + }{ + { + name: "success result", + resultBuilder: AlwaysRequeueResultBuilder{RequeueAfter: interval}, + runtimeResult: ctrl.Result{RequeueAfter: interval}, + result: true, + }, + { + name: "requeue result", + resultBuilder: AlwaysRequeueResultBuilder{RequeueAfter: interval}, + runtimeResult: ctrl.Result{Requeue: true}, + result: false, + }, + { + name: "zero result", + resultBuilder: AlwaysRequeueResultBuilder{RequeueAfter: interval}, + runtimeResult: ctrl.Result{}, + result: false, + }, + { + name: "different requeue after", + resultBuilder: AlwaysRequeueResultBuilder{RequeueAfter: interval}, + runtimeResult: ctrl.Result{RequeueAfter: time.Second}, + result: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(tt.resultBuilder.IsSuccess(tt.runtimeResult)).To(Equal(tt.result)) + }) + } +} + +func TestFailureRecovery(t *testing.T) { + failCondns := []string{ + "FooFailed", + "BarFailed", + "BazFailed", + } + tests := []struct { + name string + oldObjFunc func(obj conditions.Setter) + newObjFunc func(obj conditions.Setter) + failConditions []string + result bool + }{ + { + name: "no failures", + oldObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + failConditions: failCondns, + result: false, + }, + { + name: "no recovery", + oldObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "FooFailed", "some-reason", "message") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "FooFailed", "some-reason", "message") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + failConditions: failCondns, + result: false, + }, + { + name: "different failure", + oldObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "FooFailed", "some-reason", "message") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "BarFailed", "some-reason", "message") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + failConditions: failCondns, + result: false, + }, + { + name: "failure recovery", + oldObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "FooFailed", "some-reason", "message") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + failConditions: failCondns, + result: true, + }, + { + name: "ready to fail", + oldObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + newObjFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "BazFailed", "some-reason", "message") + conditions.MarkFalse(obj, meta.ReadyCondition, meta.SucceededReason, "ready") + }, + failConditions: failCondns, + result: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + oldObj := &sourcev1.GitRepository{} + newObj := oldObj.DeepCopy() + + if tt.oldObjFunc != nil { + tt.oldObjFunc(oldObj) + } + + if tt.newObjFunc != nil { + tt.newObjFunc(newObj) + } + + g.Expect(FailureRecovery(oldObj, newObj, tt.failConditions)).To(Equal(tt.result)) + }) + } +} + +func TestAddOptionWithStatusObservedGeneration(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj conditions.Setter) + patchOpts []patch.Option + want bool + }{ + { + name: "no conditions", + want: false, + }, + { + name: "some condition", + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + want: true, + }, + { + name: "existing option with conditions", + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo") + }, + patchOpts: []patch.Option{patch.WithForceOverwriteConditions{}, patch.WithStatusObservedGeneration{}}, + want: true, + }, + { + name: "existing option, no conditions, can't remove", + patchOpts: []patch.Option{patch.WithForceOverwriteConditions{}, patch.WithStatusObservedGeneration{}}, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + tt.patchOpts = addPatchOptionWithStatusObservedGeneration(obj, tt.patchOpts) + + // Apply the options and evaluate the result. + options := &patch.HelperOptions{} + for _, opt := range tt.patchOpts { + opt.ApplyToHelper(options) + } + g.Expect(options.IncludeStatusObservedGeneration).To(Equal(tt.want)) + }) + } +} diff --git a/internal/reconcile/summarize/matchers_test.go b/internal/reconcile/summarize/matchers_test.go new file mode 100644 index 000000000..b71aa99c8 --- /dev/null +++ b/internal/reconcile/summarize/matchers_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "fmt" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/source-controller/internal/object" +) + +// HaveStatusObservedGeneration returns a custom matcher to check if a +// runtime.Object has a given status observedGeneration value. +func HaveStatusObservedGeneration(expected int64) types.GomegaMatcher { + return &haveStatusObservedGeneration{ + expected: expected, + } +} + +type haveStatusObservedGeneration struct { + expected int64 + actual int64 +} + +func (m *haveStatusObservedGeneration) Match(actual interface{}) (success bool, err error) { + obj, ok := actual.(runtime.Object) + if !ok { + return false, fmt.Errorf("actual should be a runtime object") + } + + og, err := object.GetStatusObservedGeneration(obj) + if err != nil && err != object.ErrObservedGenerationNotFound { + return false, err + } + m.actual = og + + return Equal(m.expected).Match(og) +} + +func (m *haveStatusObservedGeneration) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%d\nto match\n\t%d\n", m.actual, m.expected) +} + +func (m *haveStatusObservedGeneration) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%d\nto not match\n\t%d\n", m.actual, m.expected) +} + +// HaveStatusLastHandledReconcileAt returns a custom matcher to check if a +// runtime.Object has a given status lastHandledReconcileAt value. +func HaveStatusLastHandledReconcileAt(expected string) types.GomegaMatcher { + return &haveStatusLastHandledReconcileAt{ + expected: expected, + } +} + +type haveStatusLastHandledReconcileAt struct { + expected string + actual string +} + +func (m *haveStatusLastHandledReconcileAt) Match(actual interface{}) (success bool, err error) { + obj, ok := actual.(runtime.Object) + if !ok { + return false, fmt.Errorf("actual should be a runtime object") + } + + ra, err := object.GetStatusLastHandledReconcileAt(obj) + if err != nil && err != object.ErrLastHandledReconcileAtNotFound { + return false, err + } + m.actual = ra + + return Equal(m.expected).Match(ra) +} + +func (m *haveStatusLastHandledReconcileAt) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%s\nto match\n\t%s\n", m.actual, m.expected) +} + +func (m *haveStatusLastHandledReconcileAt) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%s\nto not match\n\t%s\n", m.actual, m.expected) +} diff --git a/internal/reconcile/summarize/processor.go b/internal/reconcile/summarize/processor.go new file mode 100644 index 000000000..746ca7c8e --- /dev/null +++ b/internal/reconcile/summarize/processor.go @@ -0,0 +1,103 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/object" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// ResultProcessor processes the results of reconciliation (the object, result +// and error). Any errors during processing need not result in the +// reconciliation failure. The errors can be recorded as logs and events. +type ResultProcessor func(context.Context, kuberecorder.EventRecorder, client.Object, reconcile.Result, error) + +// RecordReconcileReq is a ResultProcessor that checks the reconcile +// annotation value and sets it in the object status as +// status.lastHandledReconcileAt. +func RecordReconcileReq(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, _ error) { + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + object.SetStatusLastHandledReconcileAt(obj, v) + } +} + +// ErrorActionHandler is a ResultProcessor that handles all the actions +// configured in the given error. Logging and event recording are the handled +// actions at present. As more configurations are added to serror.Config, more +// action handlers can be added here. +func ErrorActionHandler(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, err error) { + switch e := err.(type) { + case *serror.Generic: + if e.Log { + logError(ctx, e.Config.Event, e, e.Error()) + } + recordEvent(recorder, obj, e.Config.Event, e.Config.Notification, err, e.Reason) + case *serror.Waiting: + if e.Log { + logError(ctx, e.Config.Event, e, "reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter) + } + recordEvent(recorder, obj, e.Config.Event, e.Config.Notification, err, e.Reason) + case *serror.Stalling: + if e.Log { + logError(ctx, e.Config.Event, e, "reconciliation stalled") + } + recordEvent(recorder, obj, e.Config.Event, e.Config.Notification, err, e.Reason) + } +} + +// logError logs error based on the passed error configurations. +func logError(ctx context.Context, eventType string, err error, msg string, keysAndValues ...interface{}) { + switch eventType { + case corev1.EventTypeNormal, serror.EventTypeNone: + ctrl.LoggerFrom(ctx).Info(msg, keysAndValues...) + case corev1.EventTypeWarning: + ctrl.LoggerFrom(ctx).Error(err, msg, keysAndValues...) + } +} + +// recordEvent records events based on the passed error configurations. +func recordEvent(recorder kuberecorder.EventRecorder, obj client.Object, eventType string, notification bool, err error, reason string) { + if eventType == serror.EventTypeNone { + return + } + switch eventType { + case corev1.EventTypeNormal: + if notification { + // K8s native event and notification-controller event. + recorder.Eventf(obj, corev1.EventTypeNormal, reason, err.Error()) + } else { + // K8s native event only. + recorder.Eventf(obj, eventv1.EventTypeTrace, reason, err.Error()) + } + case corev1.EventTypeWarning: + // TODO: Due to the current implementation of the event recorder, all + // the K8s warning events are also sent as notification controller + // notifications. Once the recorder becomes capable of separating the + // two, conditionally record events. + recorder.Eventf(obj, corev1.EventTypeWarning, reason, err.Error()) + } +} diff --git a/internal/reconcile/summarize/processor_test.go b/internal/reconcile/summarize/processor_test.go new file mode 100644 index 000000000..44f68b5bf --- /dev/null +++ b/internal/reconcile/summarize/processor_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/source-controller/internal/object" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +func TestRecordReconcileReq(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj client.Object) + afterFunc func(t *WithT, obj client.Object) + }{ + { + name: "no reconcile req", + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "no reconcile req, noop on existing value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "zzz") + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("zzz")) + }, + }, + { + name: "with reconcile req", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) + }, + }, + { + name: "empty reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "whitespace-only reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: " ", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt(" ")) + }, + }, + { + name: "reconcile annotation overwrites existing status value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "old-value") + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "new-value", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("new-value")) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-obj", + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + RecordReconcileReq(ctx, record.NewFakeRecorder(32), obj, reconcile.ResultEmpty, nil) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} diff --git a/internal/reconcile/summarize/summary.go b/internal/reconcile/summarize/summary.go new file mode 100644 index 000000000..8650a0907 --- /dev/null +++ b/internal/reconcile/summarize/summary.go @@ -0,0 +1,275 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "errors" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// Conditions contains all the conditions information needed to summarize the +// target condition. +type Conditions struct { + // Target is the target condition, e.g.: Ready. + Target string + // Owned conditions are the conditions owned by the reconciler for this + // target condition. + Owned []string + // Summarize conditions are the conditions that the target condition depends + // on. + Summarize []string + // NegativePolarity conditions are the conditions in Summarize with negative + // polarity. + NegativePolarity []string +} + +// Helper is SummarizeAndPatch helper. +type Helper struct { + recorder kuberecorder.EventRecorder + serialPatcher *patch.SerialPatcher +} + +// NewHelper returns an initialized Helper. +func NewHelper(recorder kuberecorder.EventRecorder, serialPatcher *patch.SerialPatcher) *Helper { + return &Helper{ + recorder: recorder, + serialPatcher: serialPatcher, + } +} + +// HelperOptions contains options for SummarizeAndPatch. +// Summarizing and patching at the very end of a reconciliation involves +// computing the result of the reconciler. This requires providing the +// ReconcileResult, ReconcileError and a ResultBuilder in the context of the +// reconciliation. +// For using this to perform intermediate patching in the middle of a +// reconciliation, no ReconcileResult, ReconcileError or ResultBuilder should +// be provided. Only Conditions summary would be calculated and patched. +type HelperOptions struct { + // Conditions are conditions that needs to be summarized and persisted on + // the object. + Conditions []Conditions + // Processors are chain of ResultProcessors for processing the results. This + // can be used to analyze and modify the results. This enables injecting + // custom middlewares in the SummarizeAndPatch operation. + Processors []ResultProcessor + // IgnoreNotFound can be used to ignores any resource not found error during + // patching. + IgnoreNotFound bool + // ReconcileResult is the abstracted result of reconciliation. + ReconcileResult reconcile.Result + // ReconcileError is the reconciliation error. + ReconcileError error + // ResultBuilder defines how the reconciliation result is computed. + ResultBuilder reconcile.RuntimeResultBuilder + // PatchFieldOwner defines the field owner configuration for the Kubernetes + // patch operation. + PatchFieldOwner string + // BiPolarityConditionTypes is a list of bipolar conditions in the order + // of priority. + BiPolarityConditionTypes []string +} + +// Option is configuration that modifies SummarizeAndPatch. +type Option func(*HelperOptions) + +// WithConditions sets the Conditions for which summary is calculated in +// SummarizeAndPatch. +func WithConditions(condns ...Conditions) Option { + return func(s *HelperOptions) { + s.Conditions = append(s.Conditions, condns...) + } +} + +// WithProcessors can be used to inject middlewares in the SummarizeAndPatch +// process, to be executed before the result calculation and patching. +func WithProcessors(rps ...ResultProcessor) Option { + return func(s *HelperOptions) { + s.Processors = append(s.Processors, rps...) + } +} + +// WithIgnoreNotFound skips any resource not found error during patching. +func WithIgnoreNotFound() Option { + return func(s *HelperOptions) { + s.IgnoreNotFound = true + } +} + +// WithResultBuilder sets the strategy for result computation in +// SummarizeAndPatch. +func WithResultBuilder(rb reconcile.RuntimeResultBuilder) Option { + return func(s *HelperOptions) { + s.ResultBuilder = rb + } +} + +// WithReconcileResult sets the value of input result used to calculate the +// results of reconciliation in SummarizeAndPatch. +func WithReconcileResult(rr reconcile.Result) Option { + return func(s *HelperOptions) { + s.ReconcileResult = rr + } +} + +// WithReconcileError sets the value of input error used to calculate the +// results reconciliation in SummarizeAndPatch. +func WithReconcileError(re error) Option { + return func(s *HelperOptions) { + s.ReconcileError = re + } +} + +// WithPatchFieldOwner sets the FieldOwner in the patch helper. +func WithPatchFieldOwner(fieldOwner string) Option { + return func(s *HelperOptions) { + s.PatchFieldOwner = fieldOwner + } +} + +// WithBiPolarityConditionTypes sets the BiPolarityConditionTypes used to +// calculate the value of Ready condition in SummarizeAndPatch. +func WithBiPolarityConditionTypes(types ...string) Option { + return func(s *HelperOptions) { + s.BiPolarityConditionTypes = types + } +} + +// SummarizeAndPatch summarizes and patches the result to the target object. +// When used at the very end of a reconciliation, the result builder must be +// specified using the Option WithResultBuilder(). The returned result and error +// can be returned as the return values of the reconciliation. +// When used in the middle of a reconciliation, no result builder should be set +// and the result can be ignored. +func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, options ...Option) (ctrl.Result, error) { + // Calculate the options. + opts := &HelperOptions{} + for _, o := range options { + o(opts) + } + // Combined the owned conditions of all the conditions for the patcher. + ownedConditions := []string{} + for _, c := range opts.Conditions { + ownedConditions = append(ownedConditions, c.Owned...) + } + // Patch the object, prioritizing the conditions owned by the controller in + // case of any conflicts. + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: ownedConditions, + }, + } + if opts.PatchFieldOwner != "" { + patchOpts = append(patchOpts, patch.WithFieldOwner(opts.PatchFieldOwner)) + } + + // Process the results of reconciliation. + for _, processor := range opts.Processors { + processor(ctx, h.recorder, obj, opts.ReconcileResult, opts.ReconcileError) + } + + var result ctrl.Result + var recErr error + if opts.ResultBuilder != nil { + // Compute the reconcile results, obtain patch options and reconcile error. + var pOpts []patch.Option + pOpts, result, recErr = reconcile.ComputeReconcileResult(obj, opts.ReconcileResult, opts.ReconcileError, opts.ResultBuilder) + patchOpts = append(patchOpts, pOpts...) + } + + // Summarize conditions. This must be performed only after computing the + // reconcile result, since the object status is adjusted based on the + // reconcile result and error. + for _, c := range opts.Conditions { + conditions.SetSummary(obj, + c.Target, + conditions.WithConditions( + c.Summarize..., + ), + conditions.WithNegativePolarityConditions( + c.NegativePolarity..., + ), + ) + } + + // Check any BiPolarity conditions in the status that are False. Failing + // BiPolarity condition should be set as the Ready condition value to + // reflect the actual cause of the reconciliation failure. + // NOTE: This is applicable to Ready condition only because it is a special + // condition in kstatus that reflects the overall state of an object. + // IMPLEMENTATION NOTE: An implementation of this within the + // conditions.merge() exists in fluxcd/pkg repo branch `bipolarity` + // (https://github.com/fluxcd/pkg/commit/756b9e6d253a4fae93c05419b7019d0169454858). + // If that gets added to conditions.merge, the following can be removed. + var failedBiPolarity []string + for _, c := range opts.BiPolarityConditionTypes { + if conditions.IsFalse(obj, c) { + failedBiPolarity = append(failedBiPolarity, c) + } + } + if len(failedBiPolarity) > 0 { + topFailedBiPolarity := conditions.Get(obj, failedBiPolarity[0]) + conditions.MarkFalse(obj, meta.ReadyCondition, topFailedBiPolarity.Reason, "%s", topFailedBiPolarity.Message) + } + + // If object is not stalled, result is success and runtime error is nil, + // ensure that Ready=True. Else, use the Ready failure message as the + // runtime error message. This ensures that the reconciliation would be + // retried as the object isn't ready. + // NOTE: This is applicable to Ready condition only because it is a special + // condition in kstatus that reflects the overall state of an object. + if isNonStalledSuccess(obj, opts.ResultBuilder, result, recErr) { + if !conditions.IsReady(obj) { + recErr = errors.New(conditions.GetMessage(obj, meta.ReadyCondition)) + } + } + + // Finally, patch the resource. + if err := h.serialPatcher.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if opts.IgnoreNotFound && !obj.GetDeletionTimestamp().IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return result, recErr +} + +// isNonStalledSuccess checks if the reconciliation was successful and has not +// resulted in stalled situation. +func isNonStalledSuccess(obj conditions.Setter, rb reconcile.RuntimeResultBuilder, result ctrl.Result, recErr error) bool { + if !conditions.IsStalled(obj) && recErr == nil { + // Without result builder, it can't be determined if the result is + // success. + if rb != nil { + return rb.IsSuccess(result) + } + } + return false +} diff --git a/internal/reconcile/summarize/summary_test.go b/internal/reconcile/summarize/summary_test.go new file mode 100644 index 000000000..c4c16e4eb --- /dev/null +++ b/internal/reconcile/summarize/summary_test.go @@ -0,0 +1,552 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// This tests the scenario where SummarizeAndPatch is used at the very end of a +// reconciliation. +func TestSummarizeAndPatch(t *testing.T) { + testBipolarCondition1 := "FooChecked1" + testBipolarCondition2 := "FooChecked2" + var testReadyConditions = Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.SourceVerifiedCondition, + testBipolarCondition1, + testBipolarCondition2, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + sourcev1.SourceVerifiedCondition, + testBipolarCondition1, + testBipolarCondition2, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + } + var testBipolarConditions = []string{sourcev1.SourceVerifiedCondition, testBipolarCondition1, testBipolarCondition2} + var testFooConditions = Conditions{ + Target: "Foo", + Owned: []string{ + "Foo", + "AAA", + "BBB", + }, + Summarize: []string{ + "AAA", + "BBB", + }, + NegativePolarity: []string{ + "BBB", + }, + } + + tests := []struct { + name string + generation int64 + beforeFunc func(obj conditions.Setter) + result reconcile.Result + reconcileErr error + conditions []Conditions + bipolarConditions []string + wantErr bool + afterFunc func(t *WithT, obj client.Object) + assertConditions []metav1.Condition + }{ + // Success/Fail indicates if a reconciliation succeeded or failed. + // The object generation is expected to match the observed generation in + // the object status if Ready=True or Stalled=True at the end. + // All the cases have some Ready condition set, even if a test case is + // unrelated to the conditions, because it's neseccary for a valid + // status. + { + name: "Success, Ready=True", + generation: 4, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + result: reconcile.ResultSuccess, + conditions: []Conditions{testReadyConditions}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(4)) + }, + }, + { + name: "Success, removes reconciling for successful result", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "new index version") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(2)) + }, + }, + { + name: "Success, record reconciliation request", + beforeFunc: func(obj conditions.Setter) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + generation: 3, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) + t.Expect(obj).To(HaveStatusObservedGeneration(3)) + }, + }, + { + name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", + generation: 7, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + conditions.MarkReconciling(obj, meta.ProgressingReason, "new index revision") + }, + conditions: []Conditions{testReadyConditions}, + reconcileErr: fmt.Errorf("failed to create dir"), + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "new index revision"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).ToNot(HaveStatusObservedGeneration(7)) + }, + }, + { + name: "Success, with subreconciler stalled error", + generation: 9, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct client") + }, + conditions: []Conditions{testReadyConditions}, + reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct client"), + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct client"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(9)) + }, + }, + { + name: "Fail, no error but requeue requested", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).ToNot(HaveStatusObservedGeneration(3)) + }, + }, + { + name: "Success, multiple target conditions summary", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + conditions.MarkTrue(obj, "AAA", "ZZZ", "zzz") // Positive polarity True. + }, + conditions: []Conditions{testReadyConditions, testFooConditions}, + result: reconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + *conditions.TrueCondition("Foo", "ZZZ", "zzz"), // True summary. + *conditions.TrueCondition("AAA", "ZZZ", "zzz"), + }, + }, + { + name: "Success, multiple target conditions, False non-Ready summary don't affect result", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + conditions.MarkTrue(obj, "AAA", "ZZZ", "zzz") // Positive polarity True. + conditions.MarkTrue(obj, "BBB", "YYY", "yyy") // Negative polarity True. + }, + conditions: []Conditions{testReadyConditions, testFooConditions}, + result: reconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + *conditions.FalseCondition("Foo", "YYY", "yyy"), // False summary. + *conditions.TrueCondition("BBB", "YYY", "yyy"), + *conditions.TrueCondition("AAA", "ZZZ", "zzz"), + }, + }, + { + name: "Fail, success result but Ready=False", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + }, + wantErr: true, + }, + { + name: "Fail, reconciling with bipolar condition False, Ready gets bipolar failure value", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "new index revision") + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "verify failed") + }, + result: reconcile.ResultEmpty, + reconcileErr: errors.New("failed to verify source"), + conditions: []Conditions{testReadyConditions}, + bipolarConditions: testBipolarConditions, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "VerifyFailed", "verify failed"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "VerifyFailed", "verify failed"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "new index revision"), + }, + }, + { + name: "Fail, bipolar condition True, negative polarity True, Ready gets negative polarity value", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, meta.ProgressingReason, "new obj gen") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Success", "verified") + }, + result: reconcile.ResultEmpty, + reconcileErr: errors.New("failed to create dir"), + conditions: []Conditions{testReadyConditions}, + bipolarConditions: testBipolarConditions, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new digest"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingWithRetryReason, "new obj gen"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Success", "verified"), + }, + }, + { + name: "Fail, multiple bipolar conditions False, Ready gets the bipolar with high priority", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Success", "verified") + conditions.MarkFalse(obj, testBipolarCondition1, "AAA", "aaa") + conditions.MarkFalse(obj, testBipolarCondition2, "BBB", "bbb") + }, + result: reconcile.ResultEmpty, + reconcileErr: errors.New("some failure"), + conditions: []Conditions{testReadyConditions}, + bipolarConditions: testBipolarConditions, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "AAA", "aaa"), + *conditions.FalseCondition(testBipolarCondition1, "AAA", "aaa"), + *conditions.FalseCondition(testBipolarCondition2, "BBB", "bbb"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Success", "verified"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(sourcev1.AddToScheme(scheme)).To(Succeed()) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build() + + ctx := context.TODO() + g.Expect(c.Create(ctx, obj)).To(Succeed()) + + serialPatcher := patch.NewSerialPatcher(obj, c) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), serialPatcher) + summaryOpts := []Option{ + WithReconcileResult(tt.result), + WithReconcileError(tt.reconcileErr), + WithConditions(tt.conditions...), + WithIgnoreNotFound(), + WithProcessors(ErrorActionHandler, RecordReconcileReq), + WithResultBuilder(reconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration}), + } + if tt.bipolarConditions != nil { + summaryOpts = append(summaryOpts, WithBiPolarityConditionTypes(tt.bipolarConditions...)) + } + + _, gotErr := summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + g.Expect(gotErr != nil).To(Equal(tt.wantErr), "SummarizeAndPatch() wantErr = %v, gotErr = %v", tt.wantErr, gotErr) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + + // Check if the object status is valid as per kstatus. + condns := &conditionscheck.Conditions{NegativePolarity: testReadyConditions.NegativePolarity} + checker := conditionscheck.NewChecker(c, condns) + checker.WithT(g).CheckErr(ctx, obj) + }) + } +} + +// This tests the scenario where SummarizeAndPatch is used in the middle of +// reconciliation. +func TestSummarizeAndPatch_Intermediate(t *testing.T) { + interval := 5 * time.Second + + var testStageAConditions = Conditions{ + Target: "StageA", + Owned: []string{"StageA", "A1", "A2", "A3"}, + Summarize: []string{"A1", "A2", "A3"}, + NegativePolarity: []string{"A3"}, + } + var testStageBConditions = Conditions{ + Target: "StageB", + Owned: []string{"StageB", "B1", "B2"}, + Summarize: []string{"B1", "B2"}, + NegativePolarity: []string{"B1"}, + } + + tests := []struct { + name string + conditions []Conditions + beforeFunc func(obj conditions.Setter) + assertConditions []metav1.Condition + }{ + { + name: "single Conditions, True summary", + conditions: []Conditions{testStageAConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A1", "ZZZ", "zzz") // Positive polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition("StageA", "ZZZ", "zzz"), // True summary. + *conditions.TrueCondition("A1", "ZZZ", "zzz"), + }, + }, + { + name: "single Conditions, False summary", + conditions: []Conditions{testStageAConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A1", "ZZZ", "zzz") // Positive polarity True. + conditions.MarkTrue(obj, "A3", "OOO", "ooo") // Negative polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "OOO", "ooo"), // False summary. + *conditions.TrueCondition("A3", "OOO", "ooo"), + *conditions.TrueCondition("A1", "ZZZ", "zzz"), + }, + }, + { + name: "multiple Conditions, mixed results", + conditions: []Conditions{testStageAConditions, testStageBConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A3", "ZZZ", "zzz") // Negative polarity True. + conditions.MarkTrue(obj, "B2", "RRR", "rrr") // Positive polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "ZZZ", "zzz"), // False summary. + *conditions.TrueCondition("A3", "ZZZ", "zzz"), + *conditions.TrueCondition("StageB", "RRR", "rrr"), // True summary. + *conditions.TrueCondition("B2", "RRR", "rrr"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(sourcev1.AddToScheme(scheme)) + + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&sourcev1.GitRepository{}). + Build() + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "QQQ", "qqq"), + }, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + g.Expect(c.Create(ctx, obj)).To(Succeed()) + serialPatcher := patch.NewSerialPatcher(obj, c) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), serialPatcher) + summaryOpts := []Option{ + WithConditions(tt.conditions...), + WithResultBuilder(reconcile.AlwaysRequeueResultBuilder{RequeueAfter: interval}), + } + _, err := summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestIsNonStalledSuccess(t *testing.T) { + interval := 5 * time.Second + + tests := []struct { + name string + beforeFunc func(obj conditions.Setter) + rb reconcile.RuntimeResultBuilder + recResult ctrl.Result + recErr error + wantResult bool + }{ + { + name: "non stalled success", + rb: reconcile.AlwaysRequeueResultBuilder{RequeueAfter: interval}, + recResult: ctrl.Result{RequeueAfter: interval}, + wantResult: true, + }, + { + name: "stalled success", + beforeFunc: func(obj conditions.Setter) { + conditions.MarkStalled(obj, "FooReason", "test-msg") + }, + rb: reconcile.AlwaysRequeueResultBuilder{RequeueAfter: interval}, + recResult: ctrl.Result{RequeueAfter: interval}, + wantResult: false, + }, + { + name: "error result", + rb: reconcile.AlwaysRequeueResultBuilder{RequeueAfter: interval}, + recResult: ctrl.Result{RequeueAfter: interval}, + recErr: errors.New("some-error"), + wantResult: false, + }, + { + name: "non success result", + rb: reconcile.AlwaysRequeueResultBuilder{RequeueAfter: interval}, + recResult: ctrl.Result{RequeueAfter: 2 * time.Second}, + wantResult: false, + }, + { + name: "no result builder", + recResult: ctrl.Result{RequeueAfter: interval}, + wantResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + g.Expect(isNonStalledSuccess(obj, tt.rb, tt.recResult, tt.recErr)).To(Equal(tt.wantResult)) + }) + } +} diff --git a/internal/util/temp.go b/internal/util/temp.go new file mode 100644 index 000000000..054b12801 --- /dev/null +++ b/internal/util/temp.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// TempDirForObj creates a new temporary directory in the directory dir +// in the format of 'Kind-Namespace-Name-*', and returns the +// pathname of the new directory. +func TempDirForObj(dir string, obj client.Object) (string, error) { + return os.MkdirTemp(dir, pattern(obj)) +} + +// TempPathForObj creates a temporary file path in the format of +// '/Kind-Namespace-Name-'. +// If the given dir is empty, os.TempDir is used as a default. +func TempPathForObj(dir, suffix string, obj client.Object) string { + if dir == "" { + dir = os.TempDir() + } + randBytes := make([]byte, 16) + rand.Read(randBytes) + return filepath.Join(dir, pattern(obj)+hex.EncodeToString(randBytes)+suffix) +} + +func pattern(obj client.Object) (p string) { + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + return fmt.Sprintf("%s-%s-%s-", kind, obj.GetNamespace(), obj.GetName()) +} diff --git a/internal/util/temp_test.go b/internal/util/temp_test.go new file mode 100644 index 000000000..2f98079c6 --- /dev/null +++ b/internal/util/temp_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestTempDirForObj(t *testing.T) { + g := NewWithT(t) + + got, err := TempDirForObj("", mockObj()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeADirectory()) + defer os.RemoveAll(got) + + got2, err := TempDirForObj(got, mockObj()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got2).To(BeADirectory()) + defer os.RemoveAll(got2) + g.Expect(got2).To(ContainSubstring(got)) +} + +func TestTempPathForObj(t *testing.T) { + tests := []struct { + name string + dir string + suffix string + want string + }{ + { + name: "default", + want: filepath.Join(os.TempDir(), "secret-default-foo-"), + }, + { + name: "with directory", + dir: "/foo", + want: "/foo/secret-default-foo-", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got := TempPathForObj(tt.dir, tt.suffix, mockObj()) + g.Expect(got[:len(got)-32]).To(Equal(tt.want)) + }) + } +} + +func Test_pattern(t *testing.T) { + g := NewWithT(t) + g.Expect(pattern(mockObj())).To(Equal("secret-default-foo-")) +} + +func mockObj() client.Object { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + } +} diff --git a/main.go b/main.go index cd2bb3164..cb019e6e4 100644 --- a/main.go +++ b/main.go @@ -18,32 +18,56 @@ package main import ( "fmt" - "net" - "net/http" "os" - "path/filepath" - "strings" "time" - "github.com/go-logr/logr" flag "github.com/spf13/pflag" "helm.sh/helm/v3/pkg/getter" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlcfg "sigs.k8s.io/controller-runtime/pkg/config" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + artcfg "github.com/fluxcd/pkg/artifact/config" + artdigest "github.com/fluxcd/pkg/artifact/digest" + artsrv "github.com/fluxcd/pkg/artifact/server" + artstore "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + pkgcache "github.com/fluxcd/pkg/cache" + "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" + feathelper "github.com/fluxcd/pkg/runtime/features" + "github.com/fluxcd/pkg/runtime/jitter" + "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/pprof" + "github.com/fluxcd/pkg/runtime/probes" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/controllers" // +kubebuilder:scaffold:imports + + "github.com/fluxcd/source-controller/internal/cache" + "github.com/fluxcd/source-controller/internal/controller" + "github.com/fluxcd/source-controller/internal/features" + "github.com/fluxcd/source-controller/internal/helm" + "github.com/fluxcd/source-controller/internal/helm/registry" ) +const controllerName = "source-controller" + var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") @@ -52,210 +76,367 @@ var ( Schemes: []string{"http", "https"}, New: getter.NewHTTPGetter, }, + getter.Provider{ + Schemes: []string{"oci"}, + New: getter.NewOCIGetter, + }, } ) func init() { - _ = clientgoscheme.AddToScheme(scheme) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - _ = sourcev1.AddToScheme(scheme) + utilruntime.Must(sourcev1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } func main() { + const ( + tokenCacheDefaultMaxSize = 100 + ) + var ( - metricsAddr string - eventsAddr string - enableLeaderElection bool - storagePath string - storageAddr string - storageAdvAddr string - concurrent int - watchAllNamespaces bool - clientOptions client.Options - logOptions logger.Options + metricsAddr string + eventsAddr string + healthAddr string + concurrent int + requeueDependency time.Duration + helmIndexLimit int64 + helmChartLimit int64 + helmChartFileLimit int64 + artifactOptions artcfg.Options + clientOptions client.Options + logOptions logger.Options + leaderElectionOptions leaderelection.Options + rateLimiterOptions helper.RateLimiterOptions + featureGates feathelper.FeatureGates + watchOptions helper.WatchOptions + intervalJitterOptions jitter.IntervalOptions + helmCacheMaxSize int + helmCacheTTL string + helmCachePurgeInterval string + tokenCacheOptions pkgcache.TokenFlags + defaultServiceAccount string ) flag.StringVar(&metricsAddr, "metrics-addr", envOrDefault("METRICS_ADDR", ":8080"), "The address the metric endpoint binds to.") flag.StringVar(&eventsAddr, "events-addr", envOrDefault("EVENTS_ADDR", ""), "The address of the events receiver.") - flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") - flag.StringVar(&storagePath, "storage-path", envOrDefault("STORAGE_PATH", ""), - "The local storage path.") - flag.StringVar(&storageAddr, "storage-addr", envOrDefault("STORAGE_ADDR", ":9090"), - "The address the static file server binds to.") - flag.StringVar(&storageAdvAddr, "storage-adv-addr", envOrDefault("STORAGE_ADV_ADDR", ""), - "The advertised address of the static file server.") + flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") flag.IntVar(&concurrent, "concurrent", 2, "The number of concurrent reconciles per controller.") - flag.BoolVar(&watchAllNamespaces, "watch-all-namespaces", true, - "Watch for custom resources in all namespaces, if set to false it will only watch the runtime namespace.") - flag.Bool("log-json", false, "Set logging to JSON format.") - flag.CommandLine.MarkDeprecated("log-json", "Please use --log-encoding=json instead.") + flag.Int64Var(&helmIndexLimit, "helm-index-max-size", helm.MaxIndexSize, + "The max allowed size in bytes of a Helm repository index file.") + flag.Int64Var(&helmChartLimit, "helm-chart-max-size", helm.MaxChartSize, + "The max allowed size in bytes of a Helm chart file.") + flag.Int64Var(&helmChartFileLimit, "helm-chart-file-max-size", helm.MaxChartFileSize, + "The max allowed size in bytes of a file in a Helm chart.") + flag.DurationVar(&requeueDependency, "requeue-dependency", 30*time.Second, + "The interval at which failing dependencies are reevaluated.") + flag.IntVar(&helmCacheMaxSize, "helm-cache-max-size", 0, + "The maximum size of the cache in number of indexes.") + flag.StringVar(&helmCacheTTL, "helm-cache-ttl", "15m", + "The TTL of an index in the cache. Valid time units are ns, us (or µs), ms, s, m, h.") + flag.StringVar(&helmCachePurgeInterval, "helm-cache-purge-interval", "1m", + "The interval at which the cache is purged. Valid time units are ns, us (or µs), ms, s, m, h.") + flag.StringSliceVar(&git.KexAlgos, "ssh-kex-algos", []string{}, + "The list of key exchange algorithms to use for ssh connections, arranged from most preferred to the least.") + flag.StringSliceVar(&git.HostKeyAlgos, "ssh-hostkey-algos", []string{}, + "The list of hostkey algorithms to use for ssh connections, arranged from most preferred to the least.") + flag.StringVar(&defaultServiceAccount, auth.ControllerFlagDefaultServiceAccount, + "", "Default service account to use for workload identity when not specified in resources.") + + artifactOptions.BindFlags(flag.CommandLine) clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) + leaderElectionOptions.BindFlags(flag.CommandLine) + rateLimiterOptions.BindFlags(flag.CommandLine) + featureGates.BindFlags(flag.CommandLine) + watchOptions.BindFlags(flag.CommandLine) + intervalJitterOptions.BindFlags(flag.CommandLine) + tokenCacheOptions.BindFlags(flag.CommandLine, tokenCacheDefaultMaxSize) + flag.Parse() - ctrl.SetLogger(logger.NewLogger(logOptions)) + logger.SetLogger(logger.NewLogger(logOptions)) - var eventRecorder *events.Recorder - if eventsAddr != "" { - if er, err := events.NewRecorder(eventsAddr, "source-controller"); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } else { - eventRecorder = er - } + if defaultServiceAccount != "" { + auth.SetDefaultServiceAccount(defaultServiceAccount) } - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + if err := featureGates.WithLogger(setupLog).SupportedFeatures(features.FeatureGates()); err != nil { + setupLog.Error(err, "unable to load feature gates") + os.Exit(1) + } - watchNamespace := "" - if !watchAllNamespaces { - watchNamespace = os.Getenv("RUNTIME_NAMESPACE") + switch enabled, err := features.Enabled(auth.FeatureGateObjectLevelWorkloadIdentity); { + case err != nil: + setupLog.Error(err, "unable to check feature gate "+auth.FeatureGateObjectLevelWorkloadIdentity) + os.Exit(1) + case enabled: + auth.EnableObjectLevelWorkloadIdentity() } - restConfig := client.GetConfigOrDie(clientOptions) - mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - LeaderElection: enableLeaderElection, - LeaderElectionID: "305740c0.fluxcd.io", - Namespace: watchNamespace, - Logger: ctrl.Log, - }) + if auth.InconsistentObjectLevelConfiguration() { + setupLog.Error(auth.ErrInconsistentObjectLevelConfiguration, "invalid configuration") + os.Exit(1) + } + + if err := intervalJitterOptions.SetGlobalJitter(nil); err != nil { + setupLog.Error(err, "unable to set global jitter") + os.Exit(1) + } + + mgr := mustSetupManager(metricsAddr, healthAddr, concurrent, watchOptions, clientOptions, leaderElectionOptions) + + probes.SetupChecks(mgr, setupLog) + + metrics := helper.NewMetrics(mgr, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer) + cacheRecorder := cache.MustMakeMetrics() + eventRecorder := mustSetupEventRecorder(mgr, eventsAddr, controllerName) + + algo, err := artdigest.AlgorithmForName(artifactOptions.ArtifactDigestAlgo) if err != nil { - setupLog.Error(err, "unable to start manager") + setupLog.Error(err, "unable to configure canonical digest algorithm") os.Exit(1) } + artdigest.Canonical = algo - if storageAdvAddr == "" { - storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) + storage, err := artstore.New(&artifactOptions) + if err != nil { + setupLog.Error(err, "unable to configure artifact storage") + os.Exit(1) } - storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) - go startFileServer(storage.BasePath, storageAddr, setupLog) - - if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor("source-controller"), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, - }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ - MaxConcurrentReconciles: concurrent, + + mustSetupHelmLimits(helmIndexLimit, helmChartLimit, helmChartFileLimit) + helmIndexCache, helmIndexCacheItemTTL := mustInitHelmCache(helmCacheMaxSize, helmCacheTTL, helmCachePurgeInterval) + + var tokenCache *pkgcache.TokenCache + if tokenCacheOptions.MaxSize > 0 { + var err error + tokenCache, err = pkgcache.NewTokenCache(tokenCacheOptions.MaxSize, + pkgcache.WithMaxDuration(tokenCacheOptions.MaxDuration), + pkgcache.WithMetricsRegisterer(ctrlmetrics.Registry), + pkgcache.WithMetricsPrefix("gotk_token_")) + if err != nil { + setupLog.Error(err, "unable to create token cache") + os.Exit(1) + } + } + + ctx := ctrl.SetupSignalHandler() + + if err := (&controller.GitRepositoryReconciler{ + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metrics, + Storage: storage, + ControllerName: controllerName, + TokenCache: tokenCache, + }).SetupWithManagerAndOptions(mgr, controller.GitRepositoryReconcilerOptions{ + DependencyRequeueInterval: requeueDependency, + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { setupLog.Error(err, "unable to create controller", "controller", sourcev1.GitRepositoryKind) os.Exit(1) } - if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor("source-controller"), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, - }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ - MaxConcurrentReconciles: concurrent, + + if err := (&controller.HelmRepositoryReconciler{ + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metrics, + Storage: storage, + Getters: getters, + ControllerName: controllerName, + Cache: helmIndexCache, + TTL: helmIndexCacheItemTTL, + CacheRecorder: cacheRecorder, + }).SetupWithManagerAndOptions(mgr, controller.HelmRepositoryReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind) os.Exit(1) } - if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor("source-controller"), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, - }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ - MaxConcurrentReconciles: concurrent, + + if err := (&controller.HelmChartReconciler{ + Client: mgr.GetClient(), + RegistryClientGenerator: registry.ClientGenerator, + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + Metrics: metrics, + ControllerName: controllerName, + Cache: helmIndexCache, + TTL: helmIndexCacheItemTTL, + CacheRecorder: cacheRecorder, + }).SetupWithManagerAndOptions(ctx, mgr, controller.HelmChartReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind) os.Exit(1) } - if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor("source-controller"), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, - }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ - MaxConcurrentReconciles: concurrent, + + if err := (&controller.BucketReconciler{ + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metrics, + Storage: storage, + ControllerName: controllerName, + TokenCache: tokenCache, + }).SetupWithManagerAndOptions(mgr, controller.BucketReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Bucket") + setupLog.Error(err, "unable to create controller", "controller", sourcev1.BucketKind) + os.Exit(1) + } + + if err := (&controller.OCIRepositoryReconciler{ + Client: mgr.GetClient(), + Storage: storage, + EventRecorder: eventRecorder, + ControllerName: controllerName, + TokenCache: tokenCache, + Metrics: metrics, + }).SetupWithManagerAndOptions(mgr, controller.OCIRepositoryReconcilerOptions{ + RateLimiter: helper.GetRateLimiter(rateLimiterOptions), + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", sourcev1.OCIRepositoryKind) os.Exit(1) } // +kubebuilder:scaffold:builder + go func() { + // Block until our controller manager is elected leader. We presume our + // entire process will terminate if we lose leadership, so we don't need + // to handle that. + <-mgr.Elected() + + // Start the artifact server if running as leader. + if err := artsrv.Start(ctx, &artifactOptions); err != nil { + setupLog.Error(err, "artifact server error") + os.Exit(1) + } + }() + setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } } -func startFileServer(path string, address string, l logr.Logger) { - fs := http.FileServer(http.Dir(path)) - http.Handle("/", fs) - err := http.ListenAndServe(address, nil) +func mustSetupEventRecorder(mgr ctrl.Manager, eventsAddr, controllerName string) record.EventRecorder { + eventRecorder, err := events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName) if err != nil { - l.Error(err, "file server error") + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) } + return eventRecorder } -func mustInitStorage(path string, storageAdvAddr string, l logr.Logger) *controllers.Storage { - if path == "" { - p, _ := os.Getwd() - path = filepath.Join(p, "bin") - os.MkdirAll(path, 0777) +func mustSetupManager(metricsAddr, healthAddr string, maxConcurrent int, + watchOpts helper.WatchOptions, clientOpts client.Options, leaderOpts leaderelection.Options) ctrl.Manager { + + watchNamespace := "" + if !watchOpts.AllNamespaces { + watchNamespace = os.Getenv("RUNTIME_NAMESPACE") + } + + watchSelector, err := helper.GetWatchSelector(watchOpts) + if err != nil { + setupLog.Error(err, "unable to configure watch label selector for manager") + os.Exit(1) + } + + var disableCacheFor []ctrlclient.Object + shouldCache, err := features.Enabled(features.CacheSecretsAndConfigMaps) + if err != nil { + setupLog.Error(err, "unable to check feature gate "+features.CacheSecretsAndConfigMaps) + os.Exit(1) + } + if !shouldCache { + disableCacheFor = append(disableCacheFor, &corev1.Secret{}, &corev1.ConfigMap{}) + } + + leaderElectionId := fmt.Sprintf("%s-%s", controllerName, "leader-election") + if watchOpts.LabelSelector != "" { + leaderElectionId = leaderelection.GenerateID(leaderElectionId, watchOpts.LabelSelector) + } + + restConfig := client.GetConfigOrDie(clientOpts) + mgrConfig := ctrl.Options{ + Scheme: scheme, + HealthProbeBindAddress: healthAddr, + LeaderElection: leaderOpts.Enable, + LeaderElectionReleaseOnCancel: leaderOpts.ReleaseOnCancel, + LeaseDuration: &leaderOpts.LeaseDuration, + RenewDeadline: &leaderOpts.RenewDeadline, + RetryPeriod: &leaderOpts.RetryPeriod, + LeaderElectionID: leaderElectionId, + Logger: ctrl.Log, + Client: ctrlclient.Options{ + Cache: &ctrlclient.CacheOptions{ + DisableFor: disableCacheFor, + }, + }, + Cache: ctrlcache.Options{ + ByObject: map[ctrlclient.Object]ctrlcache.ByObject{ + &sourcev1.GitRepository{}: {Label: watchSelector}, + &sourcev1.HelmRepository{}: {Label: watchSelector}, + &sourcev1.HelmChart{}: {Label: watchSelector}, + &sourcev1.Bucket{}: {Label: watchSelector}, + &sourcev1.OCIRepository{}: {Label: watchSelector}, + }, + }, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + ExtraHandlers: pprof.GetHandlers(), + }, + Controller: ctrlcfg.Controller{ + RecoverPanic: ptr.To(true), + MaxConcurrentReconciles: maxConcurrent, + }, } - storage, err := controllers.NewStorage(path, storageAdvAddr, 5*time.Minute) + if watchNamespace != "" { + mgrConfig.Cache.DefaultNamespaces = map[string]ctrlcache.Config{ + watchNamespace: ctrlcache.Config{}, + } + } + + mgr, err := ctrl.NewManager(restConfig, mgrConfig) if err != nil { - l.Error(err, "unable to initialise storage") + setupLog.Error(err, "unable to start manager") os.Exit(1) } + return mgr +} - return storage +func mustSetupHelmLimits(indexLimit, chartLimit, chartFileLimit int64) { + helm.MaxIndexSize = indexLimit + helm.MaxChartSize = chartLimit + helm.MaxChartFileSize = chartFileLimit } -func determineAdvStorageAddr(storageAddr string, l logr.Logger) string { - // TODO(hidde): remove next MINOR prerelease as it can be passed in using - // Kubernetes' substitution. - if os.Getenv("RUNTIME_NAMESPACE") != "" { - svcParts := strings.Split(os.Getenv("HOSTNAME"), "-") - return fmt.Sprintf("%s.%s", - strings.Join(svcParts[:len(svcParts)-2], "-"), os.Getenv("RUNTIME_NAMESPACE")) +func mustInitHelmCache(maxSize int, itemTTL, purgeInterval string) (*cache.Cache, time.Duration) { + if maxSize <= 0 { + setupLog.Info("caching of Helm index files is disabled") + return nil, -1 } - host, port, err := net.SplitHostPort(storageAddr) + interval, err := time.ParseDuration(purgeInterval) if err != nil { - l.Error(err, "unable to parse storage address") + setupLog.Error(err, "unable to parse Helm index cache purge interval") os.Exit(1) } - switch host { - case "": - host = "localhost" - case "0.0.0.0": - host = os.Getenv("HOSTNAME") - if host == "" { - hn, err := os.Hostname() - if err != nil { - l.Error(err, "0.0.0.0 specified in storage addr but hostname is invalid") - os.Exit(1) - } - host = hn - } + + ttl, err := time.ParseDuration(itemTTL) + if err != nil { + setupLog.Error(err, "unable to parse Helm index cache item TTL") + os.Exit(1) } - return net.JoinHostPort(host, port) + + return cache.New(maxSize, interval), ttl } func envOrDefault(envName, defaultValue string) string { diff --git a/pkg/git/common/common.go b/pkg/git/common/common.go deleted file mode 100644 index 3b949fbb3..000000000 --- a/pkg/git/common/common.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "context" - - "github.com/go-git/go-git/v5/plumbing/transport" - git2go "github.com/libgit2/git2go/v31" - corev1 "k8s.io/api/core/v1" -) - -const ( - DefaultOrigin = "origin" - DefaultBranch = "master" - DefaultPublicKeyAuthUser = "git" -) - -type Commit interface { - Verify(secret corev1.Secret) error - Hash() string -} - -type CheckoutStrategy interface { - Checkout(ctx context.Context, path, url string, auth *Auth) (Commit, string, error) -} - -type Auth struct { - AuthMethod transport.AuthMethod - CredCallback git2go.CredentialsCallback - CertCallback git2go.CertificateCheckCallback -} - -type AuthSecretStrategy interface { - Method(secret corev1.Secret) (*Auth, error) -} diff --git a/pkg/git/git.go b/pkg/git/git.go deleted file mode 100644 index 623a6ccab..000000000 --- a/pkg/git/git.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package git - -import ( - "fmt" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/pkg/git/common" - gitv1 "github.com/fluxcd/source-controller/pkg/git/v1" - gitv2 "github.com/fluxcd/source-controller/pkg/git/v2" -) - -const ( - defaultBranch = "master" -) - -func CheckoutStrategyForRef(ref *sourcev1.GitRepositoryRef, gitImplementation string) (common.CheckoutStrategy, error) { - switch gitImplementation { - case sourcev1.GoGitImplementation: - return gitv1.CheckoutStrategyForRef(ref), nil - case sourcev1.LibGit2Implementation: - return gitv2.CheckoutStrategyForRef(ref), nil - default: - return nil, fmt.Errorf("invalid git implementation %s", gitImplementation) - } -} - -func AuthSecretStrategyForURL(url string, gitImplementation string) (common.AuthSecretStrategy, error) { - switch gitImplementation { - case sourcev1.GoGitImplementation: - return gitv1.AuthSecretStrategyForURL(url) - case sourcev1.LibGit2Implementation: - return gitv2.AuthSecretStrategyForURL(url) - default: - return nil, fmt.Errorf("invalid git implementation %s", gitImplementation) - } -} diff --git a/pkg/git/v1/checkout.go b/pkg/git/v1/checkout.go deleted file mode 100644 index 64e952b08..000000000 --- a/pkg/git/v1/checkout.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - "fmt" - "sort" - "time" - - "github.com/Masterminds/semver/v3" - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - - "github.com/fluxcd/pkg/version" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/pkg/git/common" -) - -func CheckoutStrategyForRef(ref *sourcev1.GitRepositoryRef) common.CheckoutStrategy { - switch { - case ref == nil: - return &CheckoutBranch{branch: common.DefaultBranch} - case ref.SemVer != "": - return &CheckoutSemVer{semVer: ref.SemVer} - case ref.Tag != "": - return &CheckoutTag{tag: ref.Tag} - case ref.Commit != "": - strategy := &CheckoutCommit{branch: ref.Branch, commit: ref.Commit} - if strategy.branch == "" { - strategy.branch = common.DefaultBranch - } - return strategy - case ref.Branch != "": - return &CheckoutBranch{branch: ref.Branch} - default: - return &CheckoutBranch{branch: common.DefaultBranch} - } -} - -type CheckoutBranch struct { - branch string -} - -func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - repo, err := git.PlainCloneContext(ctx, path, false, &git.CloneOptions{ - URL: url, - Auth: auth.AuthMethod, - RemoteName: common.DefaultOrigin, - ReferenceName: plumbing.NewBranchReferenceName(c.branch), - SingleBranch: true, - NoCheckout: false, - Depth: 1, - RecurseSubmodules: 0, - Progress: nil, - Tags: git.NoTags, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - head, err := repo.Head() - if err != nil { - return nil, "", fmt.Errorf("git resolve HEAD error: %w", err) - } - commit, err := repo.CommitObject(head.Hash()) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", head.Hash(), err) - } - return &Commit{commit}, fmt.Sprintf("%s/%s", c.branch, head.Hash().String()), nil -} - -type CheckoutTag struct { - tag string -} - -func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - repo, err := git.PlainCloneContext(ctx, path, false, &git.CloneOptions{ - URL: url, - Auth: auth.AuthMethod, - RemoteName: common.DefaultOrigin, - ReferenceName: plumbing.NewTagReferenceName(c.tag), - SingleBranch: true, - NoCheckout: false, - Depth: 1, - RecurseSubmodules: 0, - Progress: nil, - Tags: git.NoTags, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - head, err := repo.Head() - if err != nil { - return nil, "", fmt.Errorf("git resolve HEAD error: %w", err) - } - commit, err := repo.CommitObject(head.Hash()) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", head.Hash(), err) - } - return &Commit{commit}, fmt.Sprintf("%s/%s", c.tag, head.Hash().String()), nil -} - -type CheckoutCommit struct { - branch string - commit string -} - -func (c *CheckoutCommit) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - repo, err := git.PlainCloneContext(ctx, path, false, &git.CloneOptions{ - URL: url, - Auth: auth.AuthMethod, - RemoteName: common.DefaultOrigin, - ReferenceName: plumbing.NewBranchReferenceName(c.branch), - SingleBranch: true, - NoCheckout: false, - RecurseSubmodules: 0, - Progress: nil, - Tags: git.NoTags, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - w, err := repo.Worktree() - if err != nil { - return nil, "", fmt.Errorf("git worktree error: %w", err) - } - commit, err := repo.CommitObject(plumbing.NewHash(c.commit)) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", c.commit, err) - } - err = w.Checkout(&git.CheckoutOptions{ - Hash: commit.Hash, - Force: true, - }) - if err != nil { - return nil, "", fmt.Errorf("git checkout error: %w", err) - } - return &Commit{commit}, fmt.Sprintf("%s/%s", c.branch, commit.Hash.String()), nil -} - -type CheckoutSemVer struct { - semVer string -} - -func (c *CheckoutSemVer) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - verConstraint, err := semver.NewConstraint(c.semVer) - if err != nil { - return nil, "", fmt.Errorf("semver parse range error: %w", err) - } - - repo, err := git.PlainCloneContext(ctx, path, false, &git.CloneOptions{ - URL: url, - Auth: auth.AuthMethod, - RemoteName: common.DefaultOrigin, - NoCheckout: false, - Depth: 1, - RecurseSubmodules: 0, - Progress: nil, - Tags: git.AllTags, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - - repoTags, err := repo.Tags() - if err != nil { - return nil, "", fmt.Errorf("git list tags error: %w", err) - } - - tags := make(map[string]string) - tagTimestamps := make(map[string]time.Time) - _ = repoTags.ForEach(func(t *plumbing.Reference) error { - revision := plumbing.Revision(t.Name().String()) - hash, err := repo.ResolveRevision(revision) - if err != nil { - return fmt.Errorf("unable to resolve tag revision: %w", err) - } - commit, err := repo.CommitObject(*hash) - if err != nil { - return fmt.Errorf("unable to resolve commit of a tag revision: %w", err) - } - tagTimestamps[t.Name().Short()] = commit.Committer.When - - tags[t.Name().Short()] = t.Strings()[1] - return nil - }) - - var matchedVersions semver.Collection - for tag, _ := range tags { - v, err := version.ParseVersion(tag) - if err != nil { - continue - } - if !verConstraint.Check(v) { - continue - } - matchedVersions = append(matchedVersions, v) - } - if len(matchedVersions) == 0 { - return nil, "", fmt.Errorf("no match found for semver: %s", c.semVer) - } - - // Sort versions - sort.SliceStable(matchedVersions, func(i, j int) bool { - left := matchedVersions[i] - right := matchedVersions[j] - - if !left.Equal(right) { - return left.LessThan(right) - } - - // Having tag target timestamps at our disposal, we further try to sort - // versions into a chronological order. This is especially important for - // versions that differ only by build metadata, because it is not considered - // a part of the comparable version in Semver - return tagTimestamps[left.String()].Before(tagTimestamps[right.String()]) - }) - v := matchedVersions[len(matchedVersions)-1] - t := v.Original() - - w, err := repo.Worktree() - if err != nil { - return nil, "", fmt.Errorf("git worktree error: %w", err) - } - - err = w.Checkout(&git.CheckoutOptions{ - Branch: plumbing.NewTagReferenceName(t), - }) - if err != nil { - return nil, "", fmt.Errorf("git checkout error: %w", err) - } - - head, err := repo.Head() - if err != nil { - return nil, "", fmt.Errorf("git resolve HEAD error: %w", err) - } - - commit, err := repo.CommitObject(head.Hash()) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", head.Hash(), err) - } - - return &Commit{commit}, fmt.Sprintf("%s/%s", t, head.Hash().String()), nil -} diff --git a/pkg/git/v1/checkout_test.go b/pkg/git/v1/checkout_test.go deleted file mode 100644 index df7781d7d..000000000 --- a/pkg/git/v1/checkout_test.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "context" - "io/ioutil" - "os" - "testing" - - "github.com/fluxcd/source-controller/pkg/git/common" -) - -func TestCheckoutTagSemVer_Checkout(t *testing.T) { - auth := &common.Auth{} - tag := CheckoutTag{ - tag: "v1.7.0", - } - tmpDir, _ := ioutil.TempDir("", "test") - defer os.RemoveAll(tmpDir) - - cTag, _, err := tag.Checkout(context.TODO(), tmpDir, "https://github.com/projectcontour/contour", auth) - if err != nil { - t.Error(err) - } - - semVer := CheckoutSemVer{ - semVer: ">=1.0.0 <=1.7.0", - } - tmpDir2, _ := ioutil.TempDir("", "test") - defer os.RemoveAll(tmpDir2) - - cSemVer, _, err := semVer.Checkout(context.TODO(), tmpDir2, "https://github.com/projectcontour/contour", auth) - if err != nil { - t.Error(err) - } - - if cTag.Hash() != cSemVer.Hash() { - t.Errorf("expected semver hash %s, got %s", cTag.Hash(), cSemVer.Hash()) - } -} diff --git a/pkg/git/v1/commit.go b/pkg/git/v1/commit.go deleted file mode 100644 index ac1c2b78e..000000000 --- a/pkg/git/v1/commit.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "github.com/go-git/go-git/v5/plumbing/object" - corev1 "k8s.io/api/core/v1" -) - -type Commit struct { - commit *object.Commit -} - -func (c *Commit) Hash() string { - return c.commit.Hash.String() -} - -// Verify returns an error if the PGP signature can't be verified -func (c *Commit) Verify(secret corev1.Secret) error { - if c.commit.PGPSignature == "" { - return fmt.Errorf("no PGP signature found for commit: %s", c.commit.Hash) - } - - var verified bool - for _, bytes := range secret.Data { - if _, err := c.commit.Verify(string(bytes)); err == nil { - verified = true - break - } - } - if !verified { - return fmt.Errorf("PGP signature '%s' of '%s' can't be verified", c.commit.PGPSignature, c.commit.Author) - } - return nil -} diff --git a/pkg/git/v1/transport.go b/pkg/git/v1/transport.go deleted file mode 100644 index f8f64f5ae..000000000 --- a/pkg/git/v1/transport.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "net/url" - - "github.com/go-git/go-git/v5/plumbing/transport/http" - "github.com/go-git/go-git/v5/plumbing/transport/ssh" - corev1 "k8s.io/api/core/v1" - - "github.com/fluxcd/pkg/ssh/knownhosts" - "github.com/fluxcd/source-controller/pkg/git/common" -) - -func AuthSecretStrategyForURL(URL string) (common.AuthSecretStrategy, error) { - u, err := url.Parse(URL) - if err != nil { - return nil, fmt.Errorf("failed to parse URL to determine auth strategy: %w", err) - } - - switch { - case u.Scheme == "http", u.Scheme == "https": - return &BasicAuth{}, nil - case u.Scheme == "ssh": - return &PublicKeyAuth{user: u.User.Username()}, nil - default: - return nil, fmt.Errorf("no auth secret strategy for scheme %s", u.Scheme) - } -} - -type BasicAuth struct{} - -func (s *BasicAuth) Method(secret corev1.Secret) (*common.Auth, error) { - auth := &http.BasicAuth{} - if username, ok := secret.Data["username"]; ok { - auth.Username = string(username) - } - if password, ok := secret.Data["password"]; ok { - auth.Password = string(password) - } - if auth.Username == "" || auth.Password == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) - } - return &common.Auth{AuthMethod: auth}, nil -} - -type PublicKeyAuth struct { - user string -} - -func (s *PublicKeyAuth) Method(secret corev1.Secret) (*common.Auth, error) { - identity := secret.Data["identity"] - knownHosts := secret.Data["known_hosts"] - if len(identity) == 0 || len(knownHosts) == 0 { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'identity' and 'known_hosts'", secret.Name) - } - - user := s.user - if user == "" { - user = common.DefaultPublicKeyAuthUser - } - - pk, err := ssh.NewPublicKeys(user, identity, "") - if err != nil { - return nil, err - } - - callback, err := knownhosts.New(knownHosts) - if err != nil { - return nil, err - } - pk.HostKeyCallback = callback - return &common.Auth{AuthMethod: pk}, nil -} diff --git a/pkg/git/v1/transport_test.go b/pkg/git/v1/transport_test.go deleted file mode 100644 index 8e27033e5..000000000 --- a/pkg/git/v1/transport_test.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "reflect" - "testing" - - "github.com/go-git/go-git/v5/plumbing/transport/http" - corev1 "k8s.io/api/core/v1" - - "github.com/fluxcd/source-controller/pkg/git/common" -) - -const ( - // secretKeyFixture is a randomly generated password less - // 512bit RSA private key. - secretKeyFixture string = `-----BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQCrakELAKxozvwJijQEggYlTvS1QTZx1DaBwOhW/4kRSuR21plu -xuQeyuUiztoWeb9jgW7wjzG4j1PIJjdbsgjPIcIZ4PBY7JeEW+QRopfwuN8MHXNp -uTLgIHbkmhoOg5qBEcjzO/lEOOPpV0EmbObgqv3+wRmLJrgfzWl/cTtRewIDAQAB -AoGAawKFImpEN5Xn78iwWpQVZBsbV0AjzgHuGSiloxIZrorzf2DPHkHZzYNaclVx -/o/4tBTsfg7WumH3qr541qyZJDgU7iRMABwmx0v1vm2wQiX7NJzLzH2E9vlMC3mw -d8S99g9EqRuNH98XX8su34B9WGRPqiKvEm0RW8Hideo2/KkCQQDbs6rHcriKQyPB -paidHZAfguu0eVbyHT2EgLgRboWE+tEAqFEW2ycqNL3VPz9fRvwexbB6rpOcPpQJ -DEL4XB2XAkEAx7xJz8YlCQ2H38xggK8R8EUXF9Zhb0fqMJHMNmao1HCHVMtbsa8I -jR2EGyQ4CaIqNG5tdWukXQSJrPYDRWNvvQJAZX3rP7XUYDLB2twvN12HzbbKMhX3 -v2MYnxRjc9INpi/Dyzz2MMvOnOW+aDuOh/If2AtVCmeJUx1pf4CFk3viQwJBAKyC -t824+evjv+NQBlme3AOF6PgxtV4D4wWoJ5Uk/dTejER0j/Hbl6sqPxuiILRRV9qJ -Ngkgu4mLjc3RfenEhJECQAx8zjWUE6kHHPGAd9DfiAIQ4bChqnyS0Nwb9+Gd4hSE -P0Ah10mHiK/M0o3T8Eanwum0gbQHPnOwqZgsPkwXRqQ= ------END RSA PRIVATE KEY-----` - - // knownHostsFixture is known_hosts fixture in the expected - // format. - knownHostsFixture string = `github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==` -) - -var ( - basicAuthSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "username": []byte("git"), - "password": []byte("password"), - }, - } - privateKeySecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "identity": []byte(secretKeyFixture), - "known_hosts": []byte(knownHostsFixture), - }, - } -) - -func TestAuthSecretStrategyForURL(t *testing.T) { - tests := []struct { - name string - url string - want common.AuthSecretStrategy - wantErr bool - }{ - {"HTTP", "http://git.example.com/org/repo.git", &BasicAuth{}, false}, - {"HTTPS", "https://git.example.com/org/repo.git", &BasicAuth{}, false}, - {"SSH", "ssh://git.example.com:2222/org/repo.git", &PublicKeyAuth{}, false}, - {"SSH with username", "ssh://example@git.example.com:2222/org/repo.git", &PublicKeyAuth{user: "example"}, false}, - {"unsupported", "protocol://example.com", nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := AuthSecretStrategyForURL(tt.url) - if (err != nil) != tt.wantErr { - t.Errorf("AuthSecretStrategyForURL() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("AuthSecretStrategyForURL() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestBasicAuthStrategy_Method(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - want *common.Auth - wantErr bool - }{ - {"username and password", basicAuthSecretFixture, nil, &common.Auth{AuthMethod: &http.BasicAuth{Username: "git", Password: "password"}}, false}, - {"without username", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "username") }, nil, true}, - {"without password", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "password") }, nil, true}, - {"empty", corev1.Secret{}, nil, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - s := &BasicAuth{} - got, err := s.Method(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("Method() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Method() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestPublicKeyStrategy_Method(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - }{ - {"private key and known_hosts", privateKeySecretFixture, nil, false}, - {"missing private key", privateKeySecretFixture, func(s *corev1.Secret) { delete(s.Data, "identity") }, true}, - {"invalid private key", privateKeySecretFixture, func(s *corev1.Secret) { s.Data["identity"] = []byte(`-----BEGIN RSA PRIVATE KEY-----`) }, true}, - {"missing known_hosts", privateKeySecretFixture, func(s *corev1.Secret) { delete(s.Data, "known_hosts") }, true}, - {"invalid known_hosts", privateKeySecretFixture, func(s *corev1.Secret) { s.Data["known_hosts"] = []byte(`invalid`) }, true}, - {"empty", corev1.Secret{}, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - s := &PublicKeyAuth{} - _, err := s.Method(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("Method() error = %v, wantErr %v", err, tt.wantErr) - return - } - }) - } -} diff --git a/pkg/git/v2/checkout.go b/pkg/git/v2/checkout.go deleted file mode 100644 index 78dc7c175..000000000 --- a/pkg/git/v2/checkout.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "context" - "fmt" - - "github.com/blang/semver/v4" - git2go "github.com/libgit2/git2go/v31" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" - "github.com/fluxcd/source-controller/pkg/git/common" -) - -func CheckoutStrategyForRef(ref *sourcev1.GitRepositoryRef) common.CheckoutStrategy { - switch { - case ref == nil: - return &CheckoutBranch{branch: common.DefaultBranch} - case ref.SemVer != "": - return &CheckoutSemVer{semVer: ref.SemVer} - case ref.Tag != "": - return &CheckoutTag{tag: ref.Tag} - case ref.Commit != "": - strategy := &CheckoutCommit{branch: ref.Branch, commit: ref.Commit} - if strategy.branch == "" { - strategy.branch = common.DefaultBranch - } - return strategy - case ref.Branch != "": - return &CheckoutBranch{branch: ref.Branch} - default: - return &CheckoutBranch{branch: common.DefaultBranch} - } -} - -type CheckoutBranch struct { - branch string -} - -func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - repo, err := git2go.Clone(url, path, &git2go.CloneOptions{ - FetchOptions: &git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsNone, - RemoteCallbacks: git2go.RemoteCallbacks{ - CredentialsCallback: auth.CredCallback, - CertificateCheckCallback: auth.CertCallback, - }, - }, - CheckoutBranch: c.branch, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - head, err := repo.Head() - if err != nil { - return nil, "", fmt.Errorf("git resolve HEAD error: %w", err) - } - commit, err := repo.LookupCommit(head.Target()) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", head.Target(), err) - } - return &Commit{commit}, fmt.Sprintf("%s/%s", c.branch, head.Target().String()), nil -} - -type CheckoutTag struct { - tag string -} - -func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - repo, err := git2go.Clone(url, path, &git2go.CloneOptions{ - FetchOptions: &git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsAll, - RemoteCallbacks: git2go.RemoteCallbacks{ - CredentialsCallback: auth.CredCallback, - CertificateCheckCallback: auth.CertCallback, - }, - }, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - ref, err := repo.References.Dwim(c.tag) - if err != nil { - return nil, "", fmt.Errorf("unable to find tag '%s': %w", c.tag, err) - } - err = repo.SetHeadDetached(ref.Target()) - if err != nil { - return nil, "", fmt.Errorf("git checkout error: %w", err) - } - head, err := repo.Head() - if err != nil { - return nil, "", fmt.Errorf("git resolve HEAD error: %w", err) - } - commit, err := repo.LookupCommit(head.Target()) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", head.Target(), err) - } - return &Commit{commit}, fmt.Sprintf("%s/%s", c.tag, head.Target().String()), nil -} - -type CheckoutCommit struct { - branch string - commit string -} - -func (c *CheckoutCommit) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - repo, err := git2go.Clone(url, path, &git2go.CloneOptions{ - FetchOptions: &git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsNone, - RemoteCallbacks: git2go.RemoteCallbacks{ - CredentialsCallback: auth.CredCallback, - CertificateCheckCallback: auth.CertCallback, - }, - }, - CheckoutBranch: c.branch, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - oid, err := git2go.NewOid(c.commit) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' could not be parsed", c.commit) - } - commit, err := repo.LookupCommit(oid) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", c.commit, err) - } - tree, err := repo.LookupTree(commit.TreeId()) - if err != nil { - return nil, "", fmt.Errorf("git worktree error: %w", err) - } - err = repo.CheckoutTree(tree, &git2go.CheckoutOpts{ - Strategy: git2go.CheckoutForce, - }) - if err != nil { - return nil, "", fmt.Errorf("git checkout error: %w", err) - } - - return &Commit{commit}, fmt.Sprintf("%s/%s", c.branch, commit.Id().String()), nil -} - -type CheckoutSemVer struct { - semVer string -} - -func (c *CheckoutSemVer) Checkout(ctx context.Context, path, url string, auth *common.Auth) (common.Commit, string, error) { - rng, err := semver.ParseRange(c.semVer) - if err != nil { - return nil, "", fmt.Errorf("semver parse range error: %w", err) - } - - repo, err := git2go.Clone(url, path, &git2go.CloneOptions{ - FetchOptions: &git2go.FetchOptions{ - DownloadTags: git2go.DownloadTagsAll, - RemoteCallbacks: git2go.RemoteCallbacks{ - CredentialsCallback: auth.CredCallback, - CertificateCheckCallback: auth.CertCallback, - }, - }, - }) - if err != nil { - return nil, "", fmt.Errorf("unable to clone '%s', error: %w", url, err) - } - - repoTags, err := repo.Tags.List() - if err != nil { - return nil, "", fmt.Errorf("git list tags error: %w", err) - } - - svTags := make(map[string]string) - var svers []semver.Version - for _, tag := range repoTags { - v, _ := semver.ParseTolerant(tag) - if rng(v) { - svers = append(svers, v) - svTags[v.String()] = tag - } - } - - if len(svers) == 0 { - return nil, "", fmt.Errorf("no match found for semver: %s", c.semVer) - } - - semver.Sort(svers) - v := svers[len(svers)-1] - t := svTags[v.String()] - - ref, err := repo.References.Dwim(t) - if err != nil { - return nil, "", fmt.Errorf("unable to find tag '%s': %w", t, err) - } - err = repo.SetHeadDetached(ref.Target()) - if err != nil { - return nil, "", fmt.Errorf("git checkout error: %w", err) - } - head, err := repo.Head() - if err != nil { - return nil, "", fmt.Errorf("git resolve HEAD error: %w", err) - } - commit, err := repo.LookupCommit(head.Target()) - if err != nil { - return nil, "", fmt.Errorf("git commit '%s' not found: %w", head.Target().String(), err) - } - - return &Commit{commit}, fmt.Sprintf("%s/%s", t, head.Target().String()), nil -} diff --git a/pkg/git/v2/checkout_test.go b/pkg/git/v2/checkout_test.go deleted file mode 100644 index fd4494b6c..000000000 --- a/pkg/git/v2/checkout_test.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "context" - "io/ioutil" - "os" - "testing" - - "github.com/fluxcd/source-controller/pkg/git/common" - git2go "github.com/libgit2/git2go/v31" -) - -func TestCheckoutTagSemVer_Checkout(t *testing.T) { - certCallback := func(cert *git2go.Certificate, valid bool, hostname string) git2go.ErrorCode { - return 0 - } - auth := &common.Auth{CertCallback: certCallback} - - tag := CheckoutTag{ - tag: "v1.7.0", - } - tmpDir, _ := ioutil.TempDir("", "test") - defer os.RemoveAll(tmpDir) - - cTag, _, err := tag.Checkout(context.TODO(), tmpDir, "https://github.com/projectcontour/contour", auth) - if err != nil { - t.Error(err) - } - - semVer := CheckoutSemVer{ - semVer: ">=1.0.0 <=1.7.0", - } - tmpDir2, _ := ioutil.TempDir("", "test") - defer os.RemoveAll(tmpDir2) - - cSemVer, _, err := semVer.Checkout(context.TODO(), tmpDir2, "https://github.com/projectcontour/contour", auth) - if err != nil { - t.Error(err) - } - - if cTag.Hash() != cSemVer.Hash() { - t.Errorf("expected semver hash %s, got %s", cTag.Hash(), cSemVer.Hash()) - } -} diff --git a/pkg/git/v2/commit.go b/pkg/git/v2/commit.go deleted file mode 100644 index 62c7c6d8f..000000000 --- a/pkg/git/v2/commit.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "bytes" - "fmt" - "golang.org/x/crypto/openpgp" - "strings" - - git2go "github.com/libgit2/git2go/v31" - corev1 "k8s.io/api/core/v1" -) - -type Commit struct { - commit *git2go.Commit -} - -func (c *Commit) Hash() string { - return c.commit.Id().String() -} - -// Verify returns an error if the PGP signature can't be verified -func (c *Commit) Verify(secret corev1.Secret) error { - signature, signedData, err := c.commit.ExtractSignature() - if err != nil { - return err - } - - var verified bool - for _, b := range secret.Data { - keyRingReader := strings.NewReader(string(b)) - keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) - if err != nil { - return err - } - - _, err = openpgp.CheckArmoredDetachedSignature(keyring, strings.NewReader(signedData), bytes.NewBufferString(signature)) - if err == nil { - verified = true - break - } - } - - if !verified { - return fmt.Errorf("PGP signature '%s' of '%s' can't be verified", signature, c.commit.Committer().Email) - } - - return nil -} diff --git a/pkg/git/v2/transport.go b/pkg/git/v2/transport.go deleted file mode 100644 index 992e4d94a..000000000 --- a/pkg/git/v2/transport.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "bufio" - "bytes" - "crypto/sha1" - "fmt" - "golang.org/x/crypto/ssh" - "net/url" - "strings" - - "github.com/fluxcd/source-controller/pkg/git/common" - git2go "github.com/libgit2/git2go/v31" - corev1 "k8s.io/api/core/v1" -) - -func AuthSecretStrategyForURL(URL string) (common.AuthSecretStrategy, error) { - u, err := url.Parse(URL) - if err != nil { - return nil, fmt.Errorf("failed to parse URL to determine auth strategy: %w", err) - } - - switch { - case u.Scheme == "http", u.Scheme == "https": - return &BasicAuth{}, nil - case u.Scheme == "ssh": - return &PublicKeyAuth{user: u.User.Username()}, nil - default: - return nil, fmt.Errorf("no auth secret strategy for scheme %s", u.Scheme) - } -} - -type BasicAuth struct{} - -func (s *BasicAuth) Method(secret corev1.Secret) (*common.Auth, error) { - var username string - if d, ok := secret.Data["username"]; ok { - username = string(d) - } - var password string - if d, ok := secret.Data["password"]; ok { - password = string(d) - } - if username == "" || password == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) - } - - credCallback := func(url string, username_from_url string, allowed_types git2go.CredType) (*git2go.Cred, error) { - cred, err := git2go.NewCredUserpassPlaintext(username, password) - if err != nil { - return nil, err - } - return cred, nil - } - - return &common.Auth{CredCallback: credCallback, CertCallback: nil}, nil -} - -type PublicKeyAuth struct { - user string -} - -func (s *PublicKeyAuth) Method(secret corev1.Secret) (*common.Auth, error) { - identity := secret.Data["identity"] - knownHosts := secret.Data["known_hosts"] - if len(identity) == 0 || len(knownHosts) == 0 { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'identity' and 'known_hosts'", secret.Name) - } - - kk, err := parseKnownHosts(string(knownHosts)) - if err != nil { - return nil, err - } - - // Need to validate private key as it is not - // done by git2go when loading the key - _, err = ssh.ParsePrivateKey(identity) - if err != nil { - return nil, err - } - - user := s.user - if user == "" { - user = common.DefaultPublicKeyAuthUser - } - - credCallback := func(url string, username_from_url string, allowed_types git2go.CredType) (*git2go.Cred, error) { - cred, err := git2go.NewCredSshKeyFromMemory(user, "", string(identity), "") - if err != nil { - return nil, err - } - return cred, nil - } - certCallback := func(cert *git2go.Certificate, valid bool, hostname string) git2go.ErrorCode { - for _, k := range kk { - if k.matches(hostname, cert.Hostkey.HashSHA1[:]) { - return git2go.ErrOk - } - } - return git2go.ErrGeneric - } - - return &common.Auth{CredCallback: credCallback, CertCallback: certCallback}, nil -} - -type knownKey struct { - hosts []string - key ssh.PublicKey -} - -func parseKnownHosts(s string) ([]knownKey, error) { - knownHosts := []knownKey{} - scanner := bufio.NewScanner(strings.NewReader(s)) - for scanner.Scan() { - _, hosts, pubKey, _, _, err := ssh.ParseKnownHosts(scanner.Bytes()) - if err != nil { - return []knownKey{}, err - } - - knownHost := knownKey{ - hosts: hosts, - key: pubKey, - } - knownHosts = append(knownHosts, knownHost) - } - - if err := scanner.Err(); err != nil { - return []knownKey{}, err - } - - return knownHosts, nil -} - -func (k knownKey) matches(host string, key []byte) bool { - if !containsHost(k.hosts, host) { - return false - } - - hash := sha1.Sum([]byte(k.key.Marshal())) - if bytes.Compare(hash[:], key) != 0 { - return false - } - - return true -} - -func containsHost(hosts []string, host string) bool { - for _, h := range hosts { - if h == host { - return true - } - } - - return false -} diff --git a/pkg/git/v2/transport_test.go b/pkg/git/v2/transport_test.go deleted file mode 100644 index 8428229ea..000000000 --- a/pkg/git/v2/transport_test.go +++ /dev/null @@ -1,153 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "reflect" - "testing" - - corev1 "k8s.io/api/core/v1" - - "github.com/fluxcd/source-controller/pkg/git/common" -) - -const ( - // secretKeyFixture is a randomly generated password less - // 512bit RSA private key. - secretKeyFixture string = `-----BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQCrakELAKxozvwJijQEggYlTvS1QTZx1DaBwOhW/4kRSuR21plu -xuQeyuUiztoWeb9jgW7wjzG4j1PIJjdbsgjPIcIZ4PBY7JeEW+QRopfwuN8MHXNp -uTLgIHbkmhoOg5qBEcjzO/lEOOPpV0EmbObgqv3+wRmLJrgfzWl/cTtRewIDAQAB -AoGAawKFImpEN5Xn78iwWpQVZBsbV0AjzgHuGSiloxIZrorzf2DPHkHZzYNaclVx -/o/4tBTsfg7WumH3qr541qyZJDgU7iRMABwmx0v1vm2wQiX7NJzLzH2E9vlMC3mw -d8S99g9EqRuNH98XX8su34B9WGRPqiKvEm0RW8Hideo2/KkCQQDbs6rHcriKQyPB -paidHZAfguu0eVbyHT2EgLgRboWE+tEAqFEW2ycqNL3VPz9fRvwexbB6rpOcPpQJ -DEL4XB2XAkEAx7xJz8YlCQ2H38xggK8R8EUXF9Zhb0fqMJHMNmao1HCHVMtbsa8I -jR2EGyQ4CaIqNG5tdWukXQSJrPYDRWNvvQJAZX3rP7XUYDLB2twvN12HzbbKMhX3 -v2MYnxRjc9INpi/Dyzz2MMvOnOW+aDuOh/If2AtVCmeJUx1pf4CFk3viQwJBAKyC -t824+evjv+NQBlme3AOF6PgxtV4D4wWoJ5Uk/dTejER0j/Hbl6sqPxuiILRRV9qJ -Ngkgu4mLjc3RfenEhJECQAx8zjWUE6kHHPGAd9DfiAIQ4bChqnyS0Nwb9+Gd4hSE -P0Ah10mHiK/M0o3T8Eanwum0gbQHPnOwqZgsPkwXRqQ= ------END RSA PRIVATE KEY-----` - - // knownHostsFixture is known_hosts fixture in the expected - // format. - knownHostsFixture string = `github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==` -) - -var ( - basicAuthSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "username": []byte("git"), - "password": []byte("password"), - }, - } - privateKeySecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "identity": []byte(secretKeyFixture), - "known_hosts": []byte(knownHostsFixture), - }, - } -) - -func TestAuthSecretStrategyForURL(t *testing.T) { - tests := []struct { - name string - url string - want common.AuthSecretStrategy - wantErr bool - }{ - {"HTTP", "http://git.example.com/org/repo.git", &BasicAuth{}, false}, - {"HTTPS", "https://git.example.com/org/repo.git", &BasicAuth{}, false}, - {"SSH", "ssh://git.example.com:2222/org/repo.git", &PublicKeyAuth{}, false}, - {"SSH with username", "ssh://example@git.example.com:2222/org/repo.git", &PublicKeyAuth{user: "example"}, false}, - {"unsupported", "protocol://example.com", nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := AuthSecretStrategyForURL(tt.url) - if (err != nil) != tt.wantErr { - t.Errorf("AuthSecretStrategyForURL() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("AuthSecretStrategyForURL() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestBasicAuthStrategy_Method(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - want *common.Auth - wantErr bool - }{ - {"without username", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "username") }, nil, true}, - {"without password", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "password") }, nil, true}, - {"empty", corev1.Secret{}, nil, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - s := &BasicAuth{} - got, err := s.Method(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("Method() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Method() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestPublicKeyStrategy_Method(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - }{ - {"private key and known_hosts", privateKeySecretFixture, nil, false}, - {"missing private key", privateKeySecretFixture, func(s *corev1.Secret) { delete(s.Data, "identity") }, true}, - {"invalid private key", privateKeySecretFixture, func(s *corev1.Secret) { s.Data["identity"] = []byte(`-----BEGIN RSA PRIVATE KEY-----`) }, true}, - {"missing known_hosts", privateKeySecretFixture, func(s *corev1.Secret) { delete(s.Data, "known_hosts") }, true}, - {"invalid known_hosts", privateKeySecretFixture, func(s *corev1.Secret) { s.Data["known_hosts"] = []byte(`invalid`) }, true}, - {"empty", corev1.Secret{}, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - s := &PublicKeyAuth{} - _, err := s.Method(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("Method() error = %v, wantErr %v", err, tt.wantErr) - return - } - }) - } -} diff --git a/tests/fuzz/Dockerfile.builder b/tests/fuzz/Dockerfile.builder new file mode 100644 index 000000000..0b45115bb --- /dev/null +++ b/tests/fuzz/Dockerfile.builder @@ -0,0 +1,15 @@ +FROM gcr.io/oss-fuzz-base/base-builder-go + +RUN wget https://go.dev/dl/go1.24.0.linux-amd64.tar.gz \ + && mkdir temp-go \ + && rm -rf /root/.go/* \ + && tar -C temp-go/ -xzf go1.24.0.linux-amd64.tar.gz \ + && mv temp-go/go/* /root/.go/ + +ENV SRC=$GOPATH/src/github.com/fluxcd/source-controller +ENV FLUX_CI=true + +COPY ./ $SRC +RUN wget https://raw.githubusercontent.com/google/oss-fuzz/master/projects/fluxcd/build.sh -O $SRC/build.sh + +WORKDIR $SRC diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md new file mode 100644 index 000000000..2ae2cddb6 --- /dev/null +++ b/tests/fuzz/README.md @@ -0,0 +1,82 @@ +# fuzz testing + +Flux is part of Google's [oss fuzz] program which provides continuous fuzzing for +open source projects. + +The long running fuzzing execution is configured in the [oss-fuzz repository]. +Shorter executions are done on a per-PR basis, configured as a [github workflow]. + +### Testing locally + +Build fuzzers: + +```bash +make fuzz-build +``` +All fuzzers will be built into `./build/fuzz/out`. + +Smoke test fuzzers: + +All the fuzzers will be built and executed once, to ensure they are fully functional. + +```bash +make fuzz-smoketest +``` + +Run fuzzer locally: +```bash +./build/fuzz/out/fuzz_conditions_match +``` + +Run fuzzer inside a container: + +```bash + docker run --rm -ti \ + -v "$(pwd)/build/fuzz/out":/out \ + gcr.io/oss-fuzz/fluxcd \ + /out/fuzz_conditions_match +``` + +### Caveats of creating oss-fuzz compatible tests + +#### Segregate fuzz tests + +OSS-Fuzz does not properly support mixed `*_test.go` files, in which there is a combination +of fuzz and non-fuzz tests. To mitigate this problem, ensure your fuzz tests are not in the +same file as other Go tests. As a pattern, call your fuzz test files `*_fuzz_test.go`. + +#### Build tags to avoid conflicts when running Go tests + +Due to the issue above, code duplication will occur when creating fuzz tests that rely on +helper functions that are shared with other tests. To avoid build issues, add a conditional +build tag at the top of the `*_fuzz_test.go` file: +```go +//go:build gofuzz_libfuzzer +// +build gofuzz_libfuzzer +``` + +The build tag above is set at [go-118-fuzz-build]. +At this point in time we can't pass on specific tags from [compile_native_go_fuzzer]. + +### Running oss-fuzz locally + +The `make fuzz-smoketest` is meant to be an easy way to reproduce errors that may occur +upstream. If our checks ever run out of sync with upstream, the upstream tests can be +executed locally with: + +``` +git clone --depth 1 https://github.com/google/oss-fuzz +cd oss-fuzz +python infra/helper.py build_image fluxcd +python infra/helper.py build_fuzzers --sanitizer address --architecture x86_64 fluxcd +python infra/helper.py check_build --sanitizer address --architecture x86_64 fluxcd +``` + +For latest info on testing oss-fuzz locally, refer to the [upstream guide]. + +[oss fuzz]: https://github.com/google/oss-fuzz +[oss-fuzz repository]: https://github.com/google/oss-fuzz/tree/master/projects/fluxcd +[github workflow]: .github/workflows/cifuzz.yaml +[upstream guide]: https://google.github.io/oss-fuzz/getting-started/new-project-guide/#testing-locally +[go-118-fuzz-build]: https://github.com/AdamKorcz/go-118-fuzz-build/blob/b2031950a318d4f2dcf3ec3e128f904d5cf84623/main.go#L40 +[compile_native_go_fuzzer]: https://github.com/google/oss-fuzz/blob/c2d827cb78529fdc757c9b0b4fea0f1238a54814/infra/base-images/base-builder/compile_native_go_fuzzer#L32 diff --git a/tests/fuzz/native_go_run.sh b/tests/fuzz/native_go_run.sh new file mode 100755 index 000000000..a62410273 --- /dev/null +++ b/tests/fuzz/native_go_run.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Flux authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +# This script iterates through all go fuzzing targets, running each one +# through the period of time established by FUZZ_TIME. + +FUZZ_TIME=${FUZZ_TIME:-"5s"} + +# kustomization_fuzzer_test is not fully compatible with Go native fuzz, +# so it is ignored here. +test_files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' . | \ + grep -v "controllers_fuzzer_test.go") + +for file in ${test_files} +do + targets=$(grep -oP 'func \K(Fuzz\w*)' "${file}") + for target_name in ${targets} + do + echo "Running ${file}.${target_name} for ${FUZZ_TIME}." + file_dir=$(dirname "${file}") + + go test -fuzz="^${target_name}\$" -fuzztime "${FUZZ_TIME}" "${file_dir}" + done +done diff --git a/tests/fuzz/oss_fuzz_prebuild.sh b/tests/fuzz/oss_fuzz_prebuild.sh new file mode 100755 index 000000000..18617939e --- /dev/null +++ b/tests/fuzz/oss_fuzz_prebuild.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Flux authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +# This file is executed by upstream oss-fuzz for any requirements that +# are specific for building this project. + +# Some tests requires embedded resources. Embedding does not allow +# for traversing into ascending dirs, therefore we copy those contents here: +mkdir -p internal/controller/testdata/crd +cp config/crd/bases/*.yaml internal/controller/testdata/crd/ diff --git a/tests/fuzz/oss_fuzz_run.sh b/tests/fuzz/oss_fuzz_run.sh new file mode 100755 index 000000000..12912e51a --- /dev/null +++ b/tests/fuzz/oss_fuzz_run.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Flux authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +# run each fuzzer once to ensure they are working properly +find /out -type f -iname "fuzz*" -exec echo {} -runs=1 \; | bash -e diff --git a/tests/listener/listener.go b/tests/listener/listener.go new file mode 100644 index 000000000..289b2adf0 --- /dev/null +++ b/tests/listener/listener.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testlistener + +import ( + "net" + "strconv" + "strings" + "testing" + + . "github.com/onsi/gomega" +) + +// New creates a TCP listener on a random port and returns +// the listener, the address and the port of this listener. +// It also registers a cleanup function to close the listener +// when the test ends. +func New(t *testing.T) (net.Listener, string, int) { + t.Helper() + + lis, err := net.Listen("tcp", "localhost:0") + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { lis.Close() }) + + addr := lis.Addr().String() + addrParts := strings.Split(addr, ":") + portStr := addrParts[len(addrParts)-1] + port, err := strconv.Atoi(portStr) + g.Expect(err).NotTo(HaveOccurred()) + + return lis, addr, port +} diff --git a/tests/proxy/proxy.go b/tests/proxy/proxy.go new file mode 100644 index 000000000..33fadece4 --- /dev/null +++ b/tests/proxy/proxy.go @@ -0,0 +1,48 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testproxy + +import ( + "net/http" + "testing" + + "github.com/elazarl/goproxy" + + testlistener "github.com/fluxcd/source-controller/tests/listener" +) + +// New creates a new goproxy server on a random port and returns +// the address and the port of this server. It also registers a +// cleanup functions to close the server and the listener when +// the test ends. +func New(t *testing.T) (string, int) { + t.Helper() + + lis, addr, port := testlistener.New(t) + + handler := goproxy.NewProxyHttpServer() + handler.Verbose = true + + server := &http.Server{ + Addr: addr, + Handler: handler, + } + go server.Serve(lis) + t.Cleanup(func() { server.Close() }) + + return addr, port +} diff --git a/tests/registry/registry.go b/tests/registry/registry.go new file mode 100644 index 000000000..28b36fd20 --- /dev/null +++ b/tests/registry/registry.go @@ -0,0 +1,124 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testregistry + +import ( + "context" + "fmt" + "io" + "net/url" + "strings" + "testing" + "time" + + "github.com/distribution/distribution/v3/configuration" + "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" + "github.com/google/go-containerregistry/pkg/crane" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" + + "github.com/fluxcd/pkg/oci" + + testlistener "github.com/fluxcd/source-controller/tests/listener" +) + +func New(t *testing.T) string { + t.Helper() + + // Get a free random port and release it so the registry can use it. + listener, addr, _ := testlistener.New(t) + err := listener.Close() + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + + config := &configuration.Configuration{} + config.HTTP.Addr = addr + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + config.Log.AccessLog.Disabled = true + config.Log.Level = "error" + logrus.SetOutput(io.Discard) + + r, err := registry.NewRegistry(context.Background(), config) + g.Expect(err).NotTo(HaveOccurred()) + + go r.ListenAndServe() + + return addr +} + +type PodinfoImage struct { + URL string + Tag string + Digest gcrv1.Hash +} + +func CreatePodinfoImageFromTar(tarFilePath, tag, registryURL string, opts ...crane.Option) (*PodinfoImage, error) { + // Create Image + image, err := crane.Load(tarFilePath) + if err != nil { + return nil, err + } + + image = setPodinfoImageAnnotations(image, tag) + + // url.Parse doesn't handle urls with no scheme well e.g localhost: + if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) { + registryURL = fmt.Sprintf("http://%s", registryURL) + } + + myURL, err := url.Parse(registryURL) + if err != nil { + return nil, err + } + repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host) + + // Image digest + podinfoImageDigest, err := image.Digest() + if err != nil { + return nil, err + } + + // Push image + err = crane.Push(image, repositoryURL, opts...) + if err != nil { + return nil, err + } + + // Tag the image + err = crane.Tag(repositoryURL, tag, opts...) + if err != nil { + return nil, err + } + + return &PodinfoImage{ + URL: "oci://" + repositoryURL, + Tag: tag, + Digest: podinfoImageDigest, + }, nil +} + +func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { + metadata := map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: fmt.Sprintf("%s@sha1:b3b00fe35424a45d373bf4c7214178bc36fd7872", tag), + } + return mutate.Annotations(img, metadata).(gcrv1.Image) +}